VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 78404

Last change on this file since 78404 was 78371, checked in by vboxsync, 6 years ago

VMM: Move VT-x/AMD-V helpers that are based on CPU specs to CPUM in preparation of upcoming changes. It is better placed in CPUM if say NEM in future needs to implement nested VT-x/AMD-V.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 643.0 KB
Line 
1/* $Id: HMVMXR0.cpp 78371 2019-05-03 08:21:44Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/x86.h>
25#include <iprt/asm-amd64-x86.h>
26#include <iprt/thread.h>
27
28#include <VBox/vmm/pdmapi.h>
29#include <VBox/vmm/dbgf.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/iom.h>
32#include <VBox/vmm/selm.h>
33#include <VBox/vmm/tm.h>
34#include <VBox/vmm/em.h>
35#include <VBox/vmm/gim.h>
36#include <VBox/vmm/apic.h>
37#ifdef VBOX_WITH_REM
38# include <VBox/vmm/rem.h>
39#endif
40#include "HMInternal.h"
41#include <VBox/vmm/vm.h>
42#include <VBox/vmm/hmvmxinline.h>
43#include "HMVMXR0.h"
44#include "dtrace/VBoxVMM.h"
45
46#ifdef DEBUG_ramshankar
47# define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS
48# define HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE
49# define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
50# define HMVMX_ALWAYS_CHECK_GUEST_STATE
51# define HMVMX_ALWAYS_TRAP_ALL_XCPTS
52# define HMVMX_ALWAYS_TRAP_PF
53# define HMVMX_ALWAYS_FLUSH_TLB
54# define HMVMX_ALWAYS_SWAP_EFER
55#endif
56
57
58/*********************************************************************************************************************************
59* Defined Constants And Macros *
60*********************************************************************************************************************************/
61/** Use the function table. */
62#define HMVMX_USE_FUNCTION_TABLE
63
64/** Determine which tagged-TLB flush handler to use. */
65#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
66#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
67#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
68#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
69
70/** @name HMVMX_READ_XXX
71 * Flags to skip redundant reads of some common VMCS fields that are not part of
72 * the guest-CPU or VCPU state but are needed while handling VM-exits.
73 */
74#define HMVMX_READ_IDT_VECTORING_INFO RT_BIT_32(0)
75#define HMVMX_READ_IDT_VECTORING_ERROR_CODE RT_BIT_32(1)
76#define HMVMX_READ_EXIT_QUALIFICATION RT_BIT_32(2)
77#define HMVMX_READ_EXIT_INSTR_LEN RT_BIT_32(3)
78#define HMVMX_READ_EXIT_INTERRUPTION_INFO RT_BIT_32(4)
79#define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5)
80#define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6)
81#define HMVMX_READ_GUEST_LINEAR_ADDR RT_BIT_32(7)
82/** @} */
83
84/**
85 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
86 * guest using hardware-assisted VMX.
87 *
88 * This excludes state like GPRs (other than RSP) which are always are
89 * swapped and restored across the world-switch and also registers like EFER,
90 * MSR which cannot be modified by the guest without causing a VM-exit.
91 */
92#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
93 | CPUMCTX_EXTRN_RFLAGS \
94 | CPUMCTX_EXTRN_RSP \
95 | CPUMCTX_EXTRN_SREG_MASK \
96 | CPUMCTX_EXTRN_TABLE_MASK \
97 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
98 | CPUMCTX_EXTRN_SYSCALL_MSRS \
99 | CPUMCTX_EXTRN_SYSENTER_MSRS \
100 | CPUMCTX_EXTRN_TSC_AUX \
101 | CPUMCTX_EXTRN_OTHER_MSRS \
102 | CPUMCTX_EXTRN_CR0 \
103 | CPUMCTX_EXTRN_CR3 \
104 | CPUMCTX_EXTRN_CR4 \
105 | CPUMCTX_EXTRN_DR7 \
106 | CPUMCTX_EXTRN_HM_VMX_MASK)
107
108/**
109 * Exception bitmap mask for real-mode guests (real-on-v86).
110 *
111 * We need to intercept all exceptions manually except:
112 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
113 * due to bugs in Intel CPUs.
114 * - \#PF need not be intercepted even in real-mode if we have nested paging
115 * support.
116 */
117#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
118 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
119 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
120 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
121 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
122 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
123 | RT_BIT(X86_XCPT_XF))
124
125/** Maximum VM-instruction error number. */
126#define HMVMX_INSTR_ERROR_MAX 28
127
128/** Profiling macro. */
129#ifdef HM_PROFILE_EXIT_DISPATCH
130# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
131# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
132#else
133# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
134# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
135#endif
136
137/** Assert that preemption is disabled or covered by thread-context hooks. */
138#define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
139 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
140
141/** Assert that we haven't migrated CPUs when thread-context hooks are not
142 * used. */
143#define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
144 || (a_pVCpu)->hm.s.idEnteredCpu == RTMpCpuId(), \
145 ("Illegal migration! Entered on CPU %u Current %u\n", \
146 (a_pVCpu)->hm.s.idEnteredCpu, RTMpCpuId()))
147
148/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
149 * context. */
150#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
151 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
152 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
153
154/** Helper macro for VM-exit handlers called unexpectedly. */
155#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_pVmxTransient) \
156 do { \
157 (a_pVCpu)->hm.s.u32HMError = (a_pVmxTransient)->uExitReason; \
158 return VERR_VMX_UNEXPECTED_EXIT; \
159 } while (0)
160
161#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
162/** Macro that does the necessary privilege checks and intercepted VM-exits for
163 * guests that attempted to execute a VMX instruction. */
164# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
165 do \
166 { \
167 VBOXSTRICTRC rcStrictTmp = hmR0VmxCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
168 if (rcStrictTmp == VINF_SUCCESS) \
169 { /* likely */ } \
170 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
171 { \
172 Assert((a_pVCpu)->hm.s.Event.fPending); \
173 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
174 return VINF_SUCCESS; \
175 } \
176 else \
177 { \
178 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
179 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
180 } \
181 } while (0)
182
183/** Macro that decodes a memory operand for an instruction VM-exit. */
184# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
185 do \
186 { \
187 VBOXSTRICTRC rcStrictTmp = hmR0VmxDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
188 (a_pGCPtrEffAddr)); \
189 if (rcStrictTmp == VINF_SUCCESS) \
190 { /* likely */ } \
191 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
192 { \
193 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
194 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
195 NOREF(uXcptTmp); \
196 return VINF_SUCCESS; \
197 } \
198 else \
199 { \
200 Log4Func(("hmR0VmxDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
201 return rcStrictTmp; \
202 } \
203 } while (0)
204
205#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
206
207
208/*********************************************************************************************************************************
209* Structures and Typedefs *
210*********************************************************************************************************************************/
211/**
212 * VMX transient state.
213 *
214 * A state structure for holding miscellaneous information across
215 * VMX non-root operation and restored after the transition.
216 */
217typedef struct VMXTRANSIENT
218{
219 /** The host's rflags/eflags. */
220 RTCCUINTREG fEFlags;
221#if HC_ARCH_BITS == 32
222 uint32_t u32Alignment0;
223#endif
224 /** The guest's TPR value used for TPR shadowing. */
225 uint8_t u8GuestTpr;
226 /** Alignment. */
227 uint8_t abAlignment0[7];
228
229 /** The basic VM-exit reason. */
230 uint16_t uExitReason;
231 /** Alignment. */
232 uint16_t u16Alignment0;
233 /** The VM-exit interruption error code. */
234 uint32_t uExitIntErrorCode;
235 /** The VM-exit exit code qualification. */
236 uint64_t uExitQual;
237 /** The Guest-linear address. */
238 uint64_t uGuestLinearAddr;
239
240 /** The VM-exit interruption-information field. */
241 uint32_t uExitIntInfo;
242 /** The VM-exit instruction-length field. */
243 uint32_t cbInstr;
244 /** The VM-exit instruction-information field. */
245 VMXEXITINSTRINFO ExitInstrInfo;
246 /** Whether the VM-entry failed or not. */
247 bool fVMEntryFailed;
248 /** Whether we are currently executing a nested-guest. */
249 bool fIsNestedGuest;
250 /** Alignment. */
251 uint8_t abAlignment1[2];
252
253 /** The VM-entry interruption-information field. */
254 uint32_t uEntryIntInfo;
255 /** The VM-entry exception error code field. */
256 uint32_t uEntryXcptErrorCode;
257 /** The VM-entry instruction length field. */
258 uint32_t cbEntryInstr;
259
260 /** IDT-vectoring information field. */
261 uint32_t uIdtVectoringInfo;
262 /** IDT-vectoring error code. */
263 uint32_t uIdtVectoringErrorCode;
264
265 /** Mask of currently read VMCS fields; HMVMX_READ_XXX. */
266 uint32_t fVmcsFieldsRead;
267
268 /** Whether the guest debug state was active at the time of VM-exit. */
269 bool fWasGuestDebugStateActive;
270 /** Whether the hyper debug state was active at the time of VM-exit. */
271 bool fWasHyperDebugStateActive;
272 /** Whether TSC-offsetting and VMX-preemption timer was updated before VM-entry. */
273 bool fUpdatedTscOffsettingAndPreemptTimer;
274 /** Whether the VM-exit was caused by a page-fault during delivery of a
275 * contributory exception or a page-fault. */
276 bool fVectoringDoublePF;
277 /** Whether the VM-exit was caused by a page-fault during delivery of an
278 * external interrupt or NMI. */
279 bool fVectoringPF;
280 bool afAlignment0[3];
281
282 /** The VMCS info. object. */
283 PVMXVMCSINFO pVmcsInfo;
284} VMXTRANSIENT;
285AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
286AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntInfo, sizeof(uint64_t));
287AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo, sizeof(uint64_t));
288AssertCompileMemberAlignment(VMXTRANSIENT, fWasGuestDebugStateActive, sizeof(uint64_t));
289AssertCompileMemberAlignment(VMXTRANSIENT, pVmcsInfo, sizeof(uint64_t));
290AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
291/** Pointer to VMX transient state. */
292typedef VMXTRANSIENT *PVMXTRANSIENT;
293
294/**
295 * Memory operand read or write access.
296 */
297typedef enum VMXMEMACCESS
298{
299 VMXMEMACCESS_READ = 0,
300 VMXMEMACCESS_WRITE = 1
301} VMXMEMACCESS;
302
303/**
304 * VMX VM-exit handler.
305 *
306 * @returns Strict VBox status code (i.e. informational status codes too).
307 * @param pVCpu The cross context virtual CPU structure.
308 * @param pVmxTransient The VMX-transient structure.
309 */
310#ifndef HMVMX_USE_FUNCTION_TABLE
311typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
312#else
313typedef DECLCALLBACK(VBOXSTRICTRC) FNVMXEXITHANDLER(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
314/** Pointer to VM-exit handler. */
315typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
316#endif
317
318/**
319 * VMX VM-exit handler, non-strict status code.
320 *
321 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
322 *
323 * @returns VBox status code, no informational status code returned.
324 * @param pVCpu The cross context virtual CPU structure.
325 * @param pVmxTransient The VMX-transient structure.
326 *
327 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
328 * use of that status code will be replaced with VINF_EM_SOMETHING
329 * later when switching over to IEM.
330 */
331#ifndef HMVMX_USE_FUNCTION_TABLE
332typedef int FNVMXEXITHANDLERNSRC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
333#else
334typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
335#endif
336
337
338/*********************************************************************************************************************************
339* Internal Functions *
340*********************************************************************************************************************************/
341#ifndef HMVMX_USE_FUNCTION_TABLE
342DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
343# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
344# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
345#else
346# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
347# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
348#endif
349#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
350DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
351#endif
352
353static int hmR0VmxImportGuestState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
354#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
355static void hmR0VmxInitVmcsReadCache(PVMCPU pVCpu);
356#endif
357
358/** @name VM-exit handlers.
359 * @{
360 */
361static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmi;
362static FNVMXEXITHANDLER hmR0VmxExitExtInt;
363static FNVMXEXITHANDLER hmR0VmxExitTripleFault;
364static FNVMXEXITHANDLERNSRC hmR0VmxExitInitSignal;
365static FNVMXEXITHANDLERNSRC hmR0VmxExitSipi;
366static FNVMXEXITHANDLERNSRC hmR0VmxExitIoSmi;
367static FNVMXEXITHANDLERNSRC hmR0VmxExitSmi;
368static FNVMXEXITHANDLERNSRC hmR0VmxExitIntWindow;
369static FNVMXEXITHANDLERNSRC hmR0VmxExitNmiWindow;
370static FNVMXEXITHANDLER hmR0VmxExitTaskSwitch;
371static FNVMXEXITHANDLER hmR0VmxExitCpuid;
372static FNVMXEXITHANDLER hmR0VmxExitGetsec;
373static FNVMXEXITHANDLER hmR0VmxExitHlt;
374static FNVMXEXITHANDLERNSRC hmR0VmxExitInvd;
375static FNVMXEXITHANDLER hmR0VmxExitInvlpg;
376static FNVMXEXITHANDLER hmR0VmxExitRdpmc;
377static FNVMXEXITHANDLER hmR0VmxExitVmcall;
378#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
379static FNVMXEXITHANDLER hmR0VmxExitVmclear;
380static FNVMXEXITHANDLER hmR0VmxExitVmlaunch;
381static FNVMXEXITHANDLER hmR0VmxExitVmptrld;
382static FNVMXEXITHANDLER hmR0VmxExitVmptrst;
383static FNVMXEXITHANDLER hmR0VmxExitVmread;
384static FNVMXEXITHANDLER hmR0VmxExitVmresume;
385static FNVMXEXITHANDLER hmR0VmxExitVmwrite;
386static FNVMXEXITHANDLER hmR0VmxExitVmxoff;
387static FNVMXEXITHANDLER hmR0VmxExitVmxon;
388#endif
389static FNVMXEXITHANDLER hmR0VmxExitRdtsc;
390static FNVMXEXITHANDLERNSRC hmR0VmxExitRsm;
391static FNVMXEXITHANDLERNSRC hmR0VmxExitSetPendingXcptUD;
392static FNVMXEXITHANDLER hmR0VmxExitMovCRx;
393static FNVMXEXITHANDLER hmR0VmxExitMovDRx;
394static FNVMXEXITHANDLER hmR0VmxExitIoInstr;
395static FNVMXEXITHANDLER hmR0VmxExitRdmsr;
396static FNVMXEXITHANDLER hmR0VmxExitWrmsr;
397static FNVMXEXITHANDLERNSRC hmR0VmxExitErrInvalidGuestState;
398static FNVMXEXITHANDLERNSRC hmR0VmxExitErrMsrLoad;
399static FNVMXEXITHANDLERNSRC hmR0VmxExitErrUndefined;
400static FNVMXEXITHANDLER hmR0VmxExitMwait;
401static FNVMXEXITHANDLER hmR0VmxExitMtf;
402static FNVMXEXITHANDLER hmR0VmxExitMonitor;
403static FNVMXEXITHANDLER hmR0VmxExitPause;
404static FNVMXEXITHANDLERNSRC hmR0VmxExitErrMachineCheck;
405static FNVMXEXITHANDLERNSRC hmR0VmxExitTprBelowThreshold;
406static FNVMXEXITHANDLER hmR0VmxExitApicAccess;
407static FNVMXEXITHANDLER hmR0VmxExitXdtrAccess;
408static FNVMXEXITHANDLER hmR0VmxExitEptViolation;
409static FNVMXEXITHANDLER hmR0VmxExitEptMisconfig;
410static FNVMXEXITHANDLER hmR0VmxExitRdtscp;
411static FNVMXEXITHANDLER hmR0VmxExitPreemptTimer;
412static FNVMXEXITHANDLERNSRC hmR0VmxExitWbinvd;
413static FNVMXEXITHANDLER hmR0VmxExitXsetbv;
414static FNVMXEXITHANDLER hmR0VmxExitRdrand;
415static FNVMXEXITHANDLER hmR0VmxExitInvpcid;
416/** @} */
417
418/** @name Helpers for hardware exceptions VM-exit handlers.
419 * @{
420 */
421static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
422static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
423static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
424static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
425static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
426static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
427static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient);
428/** @} */
429
430
431/*********************************************************************************************************************************
432* Global Variables *
433*********************************************************************************************************************************/
434#ifdef VMX_USE_CACHED_VMCS_ACCESSES
435static const uint32_t g_aVmcsCacheSegBase[] =
436{
437 VMX_VMCS_GUEST_ES_BASE_CACHE_IDX,
438 VMX_VMCS_GUEST_CS_BASE_CACHE_IDX,
439 VMX_VMCS_GUEST_SS_BASE_CACHE_IDX,
440 VMX_VMCS_GUEST_DS_BASE_CACHE_IDX,
441 VMX_VMCS_GUEST_FS_BASE_CACHE_IDX,
442 VMX_VMCS_GUEST_GS_BASE_CACHE_IDX
443};
444AssertCompile(RT_ELEMENTS(g_aVmcsCacheSegBase) == X86_SREG_COUNT);
445#endif
446static const uint32_t g_aVmcsSegBase[] =
447{
448 VMX_VMCS_GUEST_ES_BASE,
449 VMX_VMCS_GUEST_CS_BASE,
450 VMX_VMCS_GUEST_SS_BASE,
451 VMX_VMCS_GUEST_DS_BASE,
452 VMX_VMCS_GUEST_FS_BASE,
453 VMX_VMCS_GUEST_GS_BASE
454};
455static const uint32_t g_aVmcsSegSel[] =
456{
457 VMX_VMCS16_GUEST_ES_SEL,
458 VMX_VMCS16_GUEST_CS_SEL,
459 VMX_VMCS16_GUEST_SS_SEL,
460 VMX_VMCS16_GUEST_DS_SEL,
461 VMX_VMCS16_GUEST_FS_SEL,
462 VMX_VMCS16_GUEST_GS_SEL
463};
464static const uint32_t g_aVmcsSegLimit[] =
465{
466 VMX_VMCS32_GUEST_ES_LIMIT,
467 VMX_VMCS32_GUEST_CS_LIMIT,
468 VMX_VMCS32_GUEST_SS_LIMIT,
469 VMX_VMCS32_GUEST_DS_LIMIT,
470 VMX_VMCS32_GUEST_FS_LIMIT,
471 VMX_VMCS32_GUEST_GS_LIMIT
472};
473static const uint32_t g_aVmcsSegAttr[] =
474{
475 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
476 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
477 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
478 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
479 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
480 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
481};
482AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
483AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
484AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
485AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
486
487#ifdef HMVMX_USE_FUNCTION_TABLE
488/**
489 * VMX_EXIT dispatch table.
490 */
491static const PFNVMXEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
492{
493 /* 00 VMX_EXIT_XCPT_OR_NMI */ hmR0VmxExitXcptOrNmi,
494 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
495 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
496 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
497 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
498 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
499 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
500 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
501 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
502 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
503 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
504 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
505 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
506 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
507 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
508 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
509 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
510 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
511 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitVmcall,
512#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
513 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitVmclear,
514 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitVmlaunch,
515 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitVmptrld,
516 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitVmptrst,
517 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitVmread,
518 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitVmresume,
519 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitVmwrite,
520 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitVmxoff,
521 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitVmxon,
522#else
523 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
524 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
525 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
526 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
527 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
528 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
529 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
530 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
531 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
532#endif
533 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
534 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
535 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
536 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
537 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
538 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
539 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
540 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
541 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
542 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
543 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
544 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
545 /* 40 UNDEFINED */ hmR0VmxExitPause,
546 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
547 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
548 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
549 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
550 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
551 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ hmR0VmxExitXdtrAccess,
552 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ hmR0VmxExitXdtrAccess,
553 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
554 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
555 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
556 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
557 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
558 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
559 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
560 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
561 /* 56 VMX_EXIT_APIC_WRITE */ hmR0VmxExitErrUndefined,
562 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
563 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
564 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD,
565 /* 60 VMX_EXIT_ENCLS */ hmR0VmxExitErrUndefined,
566 /* 61 VMX_EXIT_RDSEED */ hmR0VmxExitErrUndefined, /* only spurious exits, so undefined */
567 /* 62 VMX_EXIT_PML_FULL */ hmR0VmxExitErrUndefined,
568 /* 63 VMX_EXIT_XSAVES */ hmR0VmxExitSetPendingXcptUD,
569 /* 64 VMX_EXIT_XRSTORS */ hmR0VmxExitSetPendingXcptUD,
570};
571#endif /* HMVMX_USE_FUNCTION_TABLE */
572
573#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
574static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
575{
576 /* 0 */ "(Not Used)",
577 /* 1 */ "VMCALL executed in VMX root operation.",
578 /* 2 */ "VMCLEAR with invalid physical address.",
579 /* 3 */ "VMCLEAR with VMXON pointer.",
580 /* 4 */ "VMLAUNCH with non-clear VMCS.",
581 /* 5 */ "VMRESUME with non-launched VMCS.",
582 /* 6 */ "VMRESUME after VMXOFF",
583 /* 7 */ "VM-entry with invalid control fields.",
584 /* 8 */ "VM-entry with invalid host state fields.",
585 /* 9 */ "VMPTRLD with invalid physical address.",
586 /* 10 */ "VMPTRLD with VMXON pointer.",
587 /* 11 */ "VMPTRLD with incorrect revision identifier.",
588 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
589 /* 13 */ "VMWRITE to read-only VMCS component.",
590 /* 14 */ "(Not Used)",
591 /* 15 */ "VMXON executed in VMX root operation.",
592 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
593 /* 17 */ "VM-entry with non-launched executing VMCS.",
594 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
595 /* 19 */ "VMCALL with non-clear VMCS.",
596 /* 20 */ "VMCALL with invalid VM-exit control fields.",
597 /* 21 */ "(Not Used)",
598 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
599 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
600 /* 24 */ "VMCALL with invalid SMM-monitor features.",
601 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
602 /* 26 */ "VM-entry with events blocked by MOV SS.",
603 /* 27 */ "(Not Used)",
604 /* 28 */ "Invalid operand to INVEPT/INVVPID."
605};
606#endif /* VBOX_STRICT */
607
608
609/**
610 * Get the CR0 guest/host mask that does not change through the lifetime of a VM.
611 *
612 * Any bit set in this mask is owned by the host/hypervisor and would cause a
613 * VM-exit when modified by the guest.
614 *
615 * @returns The static CR0 guest/host mask.
616 * @param pVCpu The cross context virtual CPU structure.
617 */
618DECL_FORCE_INLINE(uint64_t) hmR0VmxGetFixedCr0Mask(PCVMCPU pVCpu)
619{
620 /*
621 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
622 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
623 */
624 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
625 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
626 * and @bugref{6944}. */
627 PVM pVM = pVCpu->CTX_SUFF(pVM);
628 return ( X86_CR0_PE
629 | X86_CR0_NE
630 | (pVM->hm.s.fNestedPaging ? 0 : X86_CR0_WP)
631 | X86_CR0_PG
632 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
633 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
634 | X86_CR0_NW); /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
635}
636
637
638/**
639 * Gets the CR4 guest/host mask that does not change through the lifetime of a VM.
640 *
641 * Any bit set in this mask is owned by the host/hypervisor and would cause a
642 * VM-exit when modified by the guest.
643 *
644 * @returns The static CR4 guest/host mask.
645 * @param pVCpu The cross context virtual CPU structure.
646 */
647DECL_FORCE_INLINE(uint64_t) hmR0VmxGetFixedCr4Mask(PCVMCPU pVCpu)
648{
649 /*
650 * We need to look at the host features here (for e.g. OSXSAVE, PCID) because
651 * these bits are reserved on hardware that does not support them. Since the
652 * CPU cannot refer to our virtual CPUID, we need to intercept CR4 changes to
653 * these bits and handle it depending on whether we expose them to the guest.
654 */
655 PVM pVM = pVCpu->CTX_SUFF(pVM);
656 bool const fXSaveRstor = pVM->cpum.ro.HostFeatures.fXSaveRstor;
657 bool const fPcid = pVM->cpum.ro.HostFeatures.fPcid;
658 return ( X86_CR4_VMXE
659 | X86_CR4_VME
660 | X86_CR4_PAE
661 | X86_CR4_PGE
662 | X86_CR4_PSE
663 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
664 | (fPcid ? X86_CR4_PCIDE : 0));
665}
666
667
668/**
669 * Returns whether the the VM-exit MSR-store area differs from the VM-exit MSR-load
670 * area.
671 *
672 * @returns @c true if it's different, @c false otherwise.
673 * @param pVmcsInfo The VMCS info. object.
674 */
675DECL_FORCE_INLINE(bool) hmR0VmxIsSeparateExitMsrStoreAreaVmcs(PCVMXVMCSINFO pVmcsInfo)
676{
677 return RT_BOOL( pVmcsInfo->pvGuestMsrStore != pVmcsInfo->pvGuestMsrLoad
678 && pVmcsInfo->pvGuestMsrStore);
679}
680
681
682/**
683 * Adds one or more exceptions to the exception bitmap and commits it to the current
684 * VMCS.
685 *
686 * @returns VBox status code.
687 * @param pVmxTransient The VMX-transient structure.
688 * @param uXcptMask The exception(s) to add.
689 */
690static int hmR0VmxAddXcptInterceptMask(PVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
691{
692 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
693 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
694 if ((uXcptBitmap & uXcptMask) != uXcptMask)
695 {
696 uXcptBitmap |= uXcptMask;
697 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
698 AssertRCReturn(rc, rc);
699 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
700 }
701 return VINF_SUCCESS;
702}
703
704
705/**
706 * Adds an exception to the exception bitmap and commits it to the current VMCS.
707 *
708 * @returns VBox status code.
709 * @param pVmxTransient The VMX-transient structure.
710 * @param uXcpt The exception to add.
711 */
712static int hmR0VmxAddXcptIntercept(PVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
713{
714 Assert(uXcpt <= X86_XCPT_LAST);
715 return hmR0VmxAddXcptInterceptMask(pVmxTransient, RT_BIT_32(uXcpt));
716}
717
718
719/**
720 * Remove one or more exceptions from the exception bitmap and commits it to the
721 * current VMCS.
722 *
723 * This takes care of not removing the exception intercept if a nested-guest
724 * requires the exception to be intercepted.
725 *
726 * @returns VBox status code.
727 * @param pVCpu The cross context virtual CPU structure.
728 * @param pVmxTransient The VMX-transient structure.
729 * @param uXcptMask The exception(s) to remove.
730 */
731static int hmR0VmxRemoveXcptInterceptMask(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
732{
733 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
734 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
735 if (u32XcptBitmap & uXcptMask)
736 {
737#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
738 if (!pVmxTransient->fIsNestedGuest)
739 { /* likely */ }
740 else
741 {
742 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
743 uXcptMask &= ~pVmcsNstGst->u32XcptBitmap;
744 }
745#endif
746#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
747 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
748 | RT_BIT(X86_XCPT_DE)
749 | RT_BIT(X86_XCPT_NM)
750 | RT_BIT(X86_XCPT_TS)
751 | RT_BIT(X86_XCPT_UD)
752 | RT_BIT(X86_XCPT_NP)
753 | RT_BIT(X86_XCPT_SS)
754 | RT_BIT(X86_XCPT_GP)
755 | RT_BIT(X86_XCPT_PF)
756 | RT_BIT(X86_XCPT_MF));
757#elif defined(HMVMX_ALWAYS_TRAP_PF)
758 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
759#endif
760 if (uXcptMask)
761 {
762 /* Validate we are not removing any essential exception intercepts. */
763 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF))); RT_NOREF(pVCpu);
764 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
765 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
766
767 /* Remove it from the exception bitmap. */
768 u32XcptBitmap &= ~uXcptMask;
769
770 /* Commit and update the cache if necessary. */
771 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
772 {
773 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
774 AssertRCReturn(rc, rc);
775 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
776 }
777 }
778 }
779 return VINF_SUCCESS;
780}
781
782
783/**
784 * Remove an exceptions from the exception bitmap and commits it to the current
785 * VMCS.
786 *
787 * @returns VBox status code.
788 * @param pVCpu The cross context virtual CPU structure.
789 * @param pVmxTransient The VMX-transient structure.
790 * @param uXcpt The exception to remove.
791 */
792static int hmR0VmxRemoveXcptIntercept(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
793{
794 return hmR0VmxRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
795}
796
797
798/**
799 * Loads the VMCS specified by the VMCS info. object.
800 *
801 * @returns VBox status code.
802 * @param pVmcsInfo The VMCS info. object.
803 */
804static int hmR0VmxLoadVmcs(PVMXVMCSINFO pVmcsInfo)
805{
806 Assert(pVmcsInfo);
807 Assert(pVmcsInfo->HCPhysVmcs);
808 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
809
810 if (pVmcsInfo->fVmcsState & VMX_V_VMCS_LAUNCH_STATE_CLEAR)
811 {
812 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysVmcs);
813 if (RT_SUCCESS(rc))
814 {
815 pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
816 return VINF_SUCCESS;
817 }
818 return rc;
819 }
820 return VERR_VMX_INVALID_VMCS_LAUNCH_STATE;
821}
822
823
824/**
825 * Clears the VMCS specified by the VMCS info. object.
826 *
827 * @returns VBox status code.
828 * @param pVmcsInfo The VMCS info. object.
829 */
830static int hmR0VmxClearVmcs(PVMXVMCSINFO pVmcsInfo)
831{
832 Assert(pVmcsInfo);
833 Assert(pVmcsInfo->HCPhysVmcs);
834 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
835
836 int rc = VMXClearVmcs(pVmcsInfo->HCPhysVmcs);
837 if (RT_SUCCESS(rc))
838 pVmcsInfo->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
839 return rc;
840}
841
842
843#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
844/**
845 * Switches the current VMCS to the one specified.
846 *
847 * @returns VBox status code.
848 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
849 * @param pVmcsInfoTo The VMCS info. object we are switching to.
850 *
851 * @remarks Called with interrupts disabled.
852 */
853static int hmR0VmxSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
854{
855 Assert(pVmcsInfoFrom);
856 Assert(pVmcsInfoTo);
857
858 /*
859 * Clear the VMCS we are switching out if it has not already been cleared.
860 * This will sync any CPU internal data back to the VMCS.
861 */
862 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
863 {
864 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
865 if (RT_SUCCESS(rc))
866 { /* likely */ }
867 else
868 return rc;
869 }
870
871 /*
872 * Clear the VMCS we are switching to if it has not already been cleared.
873 * This will initialize the VMCS launch state to "clear" required for loading it.
874 *
875 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
876 */
877 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
878 {
879 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
880 if (RT_SUCCESS(rc))
881 { /* likely */ }
882 else
883 return rc;
884 }
885
886 /*
887 * Finally, load the VMCS we are switching to.
888 */
889 return hmR0VmxLoadVmcs(pVmcsInfoTo);
890}
891#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
892
893
894/**
895 * Updates the VM's last error record.
896 *
897 * If there was a VMX instruction error, reads the error data from the VMCS and
898 * updates VCPU's last error record as well.
899 *
900 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
901 * Can be NULL if @a rc is not VERR_VMX_UNABLE_TO_START_VM or
902 * VERR_VMX_INVALID_VMCS_FIELD.
903 * @param rc The error code.
904 */
905static void hmR0VmxUpdateErrorRecord(PVMCPU pVCpu, int rc)
906{
907 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
908 || rc == VERR_VMX_UNABLE_TO_START_VM)
909 {
910 AssertPtrReturnVoid(pVCpu);
911 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
912 }
913 pVCpu->CTX_SUFF(pVM)->hm.s.rcInit = rc;
914}
915
916
917#ifdef VBOX_STRICT
918/**
919 * Reads the VM-entry interruption-information field from the VMCS into the VMX
920 * transient structure.
921 *
922 * @returns VBox status code.
923 * @param pVmxTransient The VMX-transient structure.
924 *
925 * @remarks No-long-jump zone!!!
926 */
927DECLINLINE(int) hmR0VmxReadEntryIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
928{
929 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
930 AssertRCReturn(rc, rc);
931 return VINF_SUCCESS;
932}
933
934
935/**
936 * Reads the VM-entry exception error code field from the VMCS into
937 * the VMX transient structure.
938 *
939 * @returns VBox status code.
940 * @param pVmxTransient The VMX-transient structure.
941 *
942 * @remarks No-long-jump zone!!!
943 */
944DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
945{
946 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
947 AssertRCReturn(rc, rc);
948 return VINF_SUCCESS;
949}
950
951
952/**
953 * Reads the VM-entry exception error code field from the VMCS into
954 * the VMX transient structure.
955 *
956 * @returns VBox status code.
957 * @param pVmxTransient The VMX-transient structure.
958 *
959 * @remarks No-long-jump zone!!!
960 */
961DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
962{
963 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
964 AssertRCReturn(rc, rc);
965 return VINF_SUCCESS;
966}
967#endif /* VBOX_STRICT */
968
969
970/**
971 * Reads the VM-exit interruption-information field from the VMCS into the VMX
972 * transient structure.
973 *
974 * @returns VBox status code.
975 * @param pVmxTransient The VMX-transient structure.
976 */
977DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
978{
979 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
980 {
981 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
982 AssertRCReturn(rc,rc);
983 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
984 }
985 return VINF_SUCCESS;
986}
987
988
989/**
990 * Reads the VM-exit interruption error code from the VMCS into the VMX
991 * transient structure.
992 *
993 * @returns VBox status code.
994 * @param pVmxTransient The VMX-transient structure.
995 */
996DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
997{
998 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
999 {
1000 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1001 AssertRCReturn(rc, rc);
1002 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1003 }
1004 return VINF_SUCCESS;
1005}
1006
1007
1008/**
1009 * Reads the VM-exit instruction length field from the VMCS into the VMX
1010 * transient structure.
1011 *
1012 * @returns VBox status code.
1013 * @param pVmxTransient The VMX-transient structure.
1014 */
1015DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
1016{
1017 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1018 {
1019 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
1020 AssertRCReturn(rc, rc);
1021 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1022 }
1023 return VINF_SUCCESS;
1024}
1025
1026
1027/**
1028 * Reads the VM-exit instruction-information field from the VMCS into
1029 * the VMX transient structure.
1030 *
1031 * @returns VBox status code.
1032 * @param pVmxTransient The VMX-transient structure.
1033 */
1034DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient)
1035{
1036 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1037 {
1038 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1039 AssertRCReturn(rc, rc);
1040 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1041 }
1042 return VINF_SUCCESS;
1043}
1044
1045
1046/**
1047 * Reads the VM-exit Qualification from the VMCS into the VMX transient structure.
1048 *
1049 * @returns VBox status code.
1050 * @param pVCpu The cross context virtual CPU structure of the
1051 * calling EMT. (Required for the VMCS cache case.)
1052 * @param pVmxTransient The VMX-transient structure.
1053 */
1054DECLINLINE(int) hmR0VmxReadExitQualVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
1055{
1056 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1057 {
1058 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual); NOREF(pVCpu);
1059 AssertRCReturn(rc, rc);
1060 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1061 }
1062 return VINF_SUCCESS;
1063}
1064
1065
1066/**
1067 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1068 *
1069 * @returns VBox status code.
1070 * @param pVCpu The cross context virtual CPU structure of the
1071 * calling EMT. (Required for the VMCS cache case.)
1072 * @param pVmxTransient The VMX-transient structure.
1073 */
1074DECLINLINE(int) hmR0VmxReadGuestLinearAddrVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
1075{
1076 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1077 {
1078 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr); NOREF(pVCpu);
1079 AssertRCReturn(rc, rc);
1080 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1081 }
1082 return VINF_SUCCESS;
1083}
1084
1085
1086/**
1087 * Reads the IDT-vectoring information field from the VMCS into the VMX
1088 * transient structure.
1089 *
1090 * @returns VBox status code.
1091 * @param pVmxTransient The VMX-transient structure.
1092 *
1093 * @remarks No-long-jump zone!!!
1094 */
1095DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
1096{
1097 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1098 {
1099 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1100 AssertRCReturn(rc, rc);
1101 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1102 }
1103 return VINF_SUCCESS;
1104}
1105
1106
1107/**
1108 * Reads the IDT-vectoring error code from the VMCS into the VMX
1109 * transient structure.
1110 *
1111 * @returns VBox status code.
1112 * @param pVmxTransient The VMX-transient structure.
1113 */
1114DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
1115{
1116 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1117 {
1118 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1119 AssertRCReturn(rc, rc);
1120 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1121 }
1122 return VINF_SUCCESS;
1123}
1124
1125
1126/**
1127 * Enters VMX root mode operation on the current CPU.
1128 *
1129 * @returns VBox status code.
1130 * @param pVM The cross context VM structure. Can be
1131 * NULL, after a resume.
1132 * @param HCPhysCpuPage Physical address of the VMXON region.
1133 * @param pvCpuPage Pointer to the VMXON region.
1134 */
1135static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
1136{
1137 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
1138 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
1139 Assert(pvCpuPage);
1140 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1141
1142 if (pVM)
1143 {
1144 /* Write the VMCS revision identifier to the VMXON region. */
1145 *(uint32_t *)pvCpuPage = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID);
1146 }
1147
1148 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */
1149 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1150
1151 /* Enable the VMX bit in CR4 if necessary. */
1152 RTCCUINTREG const uOldCr4 = SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);
1153
1154 /* Enter VMX root mode. */
1155 int rc = VMXEnable(HCPhysCpuPage);
1156 if (RT_FAILURE(rc))
1157 {
1158 if (!(uOldCr4 & X86_CR4_VMXE))
1159 SUPR0ChangeCR4(0 /* fOrMask */, ~X86_CR4_VMXE);
1160
1161 if (pVM)
1162 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
1163 }
1164
1165 /* Restore interrupts. */
1166 ASMSetFlags(fEFlags);
1167 return rc;
1168}
1169
1170
1171/**
1172 * Exits VMX root mode operation on the current CPU.
1173 *
1174 * @returns VBox status code.
1175 */
1176static int hmR0VmxLeaveRootMode(void)
1177{
1178 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1179
1180 /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */
1181 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1182
1183 /* If we're for some reason not in VMX root mode, then don't leave it. */
1184 RTCCUINTREG const uHostCR4 = ASMGetCR4();
1185
1186 int rc;
1187 if (uHostCR4 & X86_CR4_VMXE)
1188 {
1189 /* Exit VMX root mode and clear the VMX bit in CR4. */
1190 VMXDisable();
1191 SUPR0ChangeCR4(0 /* fOrMask */, ~X86_CR4_VMXE);
1192 rc = VINF_SUCCESS;
1193 }
1194 else
1195 rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE;
1196
1197 /* Restore interrupts. */
1198 ASMSetFlags(fEFlags);
1199 return rc;
1200}
1201
1202
1203/**
1204 * Allocates and maps a physically contiguous page. The allocated page is
1205 * zero'd out (used by various VT-x structures).
1206 *
1207 * @returns IPRT status code.
1208 * @param pMemObj Pointer to the ring-0 memory object.
1209 * @param ppVirt Where to store the virtual address of the
1210 * allocation.
1211 * @param pHCPhys Where to store the physical address of the
1212 * allocation.
1213 */
1214static int hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
1215{
1216 AssertPtr(pMemObj);
1217 AssertPtr(ppVirt);
1218 AssertPtr(pHCPhys);
1219 int rc = RTR0MemObjAllocCont(pMemObj, X86_PAGE_4K_SIZE, false /* fExecutable */);
1220 if (RT_FAILURE(rc))
1221 return rc;
1222 *ppVirt = RTR0MemObjAddress(*pMemObj);
1223 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
1224 ASMMemZero32(*ppVirt, X86_PAGE_4K_SIZE);
1225 return VINF_SUCCESS;
1226}
1227
1228
1229/**
1230 * Frees and unmaps an allocated, physical page.
1231 *
1232 * @param pMemObj Pointer to the ring-0 memory object.
1233 * @param ppVirt Where to re-initialize the virtual address of
1234 * allocation as 0.
1235 * @param pHCPhys Where to re-initialize the physical address of the
1236 * allocation as 0.
1237 */
1238static void hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
1239{
1240 AssertPtr(pMemObj);
1241 AssertPtr(ppVirt);
1242 AssertPtr(pHCPhys);
1243 /* NULL is valid, accepted and ignored by the free function below. */
1244 RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
1245 *pMemObj = NIL_RTR0MEMOBJ;
1246 *ppVirt = NULL;
1247 *pHCPhys = NIL_RTHCPHYS;
1248}
1249
1250
1251/**
1252 * Initializes a VMCS info. object.
1253 *
1254 * @param pVmcsInfo The VMCS info. object.
1255 */
1256static void hmR0VmxInitVmcsInfo(PVMXVMCSINFO pVmcsInfo)
1257{
1258 memset(pVmcsInfo, 0, sizeof(*pVmcsInfo));
1259
1260 Assert(pVmcsInfo->hMemObjVmcs == NIL_RTR0MEMOBJ);
1261 Assert(pVmcsInfo->hMemObjMsrBitmap == NIL_RTR0MEMOBJ);
1262 Assert(pVmcsInfo->hMemObjGuestMsrLoad == NIL_RTR0MEMOBJ);
1263 Assert(pVmcsInfo->hMemObjGuestMsrStore == NIL_RTR0MEMOBJ);
1264 Assert(pVmcsInfo->hMemObjHostMsrLoad == NIL_RTR0MEMOBJ);
1265 pVmcsInfo->HCPhysVmcs = NIL_RTHCPHYS;
1266 pVmcsInfo->HCPhysMsrBitmap = NIL_RTHCPHYS;
1267 pVmcsInfo->HCPhysGuestMsrLoad = NIL_RTHCPHYS;
1268 pVmcsInfo->HCPhysGuestMsrStore = NIL_RTHCPHYS;
1269 pVmcsInfo->HCPhysHostMsrLoad = NIL_RTHCPHYS;
1270 pVmcsInfo->HCPhysVirtApic = NIL_RTHCPHYS;
1271 pVmcsInfo->HCPhysEPTP = NIL_RTHCPHYS;
1272 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
1273}
1274
1275
1276/**
1277 * Frees the VT-x structures for a VMCS info. object.
1278 *
1279 * @param pVM The cross context VM structure.
1280 * @param pVmcsInfo The VMCS info. object.
1281 */
1282static void hmR0VmxFreeVmcsInfo(PVM pVM, PVMXVMCSINFO pVmcsInfo)
1283{
1284 hmR0VmxPageFree(&pVmcsInfo->hMemObjVmcs, &pVmcsInfo->pvVmcs, &pVmcsInfo->HCPhysVmcs);
1285
1286 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
1287 hmR0VmxPageFree(&pVmcsInfo->hMemObjMsrBitmap, &pVmcsInfo->pvMsrBitmap, &pVmcsInfo->HCPhysMsrBitmap);
1288
1289 hmR0VmxPageFree(&pVmcsInfo->hMemObjHostMsrLoad, &pVmcsInfo->pvHostMsrLoad, &pVmcsInfo->HCPhysHostMsrLoad);
1290 hmR0VmxPageFree(&pVmcsInfo->hMemObjGuestMsrLoad, &pVmcsInfo->pvGuestMsrLoad, &pVmcsInfo->HCPhysGuestMsrLoad);
1291 hmR0VmxPageFree(&pVmcsInfo->hMemObjGuestMsrStore, &pVmcsInfo->pvGuestMsrStore, &pVmcsInfo->HCPhysGuestMsrStore);
1292
1293 hmR0VmxInitVmcsInfo(pVmcsInfo);
1294}
1295
1296
1297/**
1298 * Allocates the VT-x structures for a VMCS info. object.
1299 *
1300 * @returns VBox status code.
1301 * @param pVCpu The cross context virtual CPU structure.
1302 * @param pVmcsInfo The VMCS info. object.
1303 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1304 */
1305static int hmR0VmxAllocVmcsInfo(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1306{
1307 PVM pVM = pVCpu->CTX_SUFF(pVM);
1308
1309 /* Allocate the guest VM control structure (VMCS). */
1310 int rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjVmcs, &pVmcsInfo->pvVmcs, &pVmcsInfo->HCPhysVmcs);
1311 if (RT_SUCCESS(rc))
1312 {
1313 if (!fIsNstGstVmcs)
1314 {
1315 /* Get the allocated virtual-APIC page from the virtual APIC device. */
1316 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
1317 && (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW))
1318 {
1319 rc = APICGetApicPageForCpu(pVCpu, &pVmcsInfo->HCPhysVirtApic, (PRTR0PTR)&pVmcsInfo->pbVirtApic,
1320 NULL /* pR3Ptr */, NULL /* pRCPtr */);
1321 }
1322 }
1323 else
1324 {
1325 Assert(pVmcsInfo->HCPhysVirtApic == NIL_RTHCPHYS);
1326 Assert(!pVmcsInfo->pbVirtApic);
1327 }
1328
1329 if (RT_SUCCESS(rc))
1330 {
1331 /*
1332 * Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for
1333 * transparent accesses of specific MSRs.
1334 *
1335 * If the condition for enabling MSR bitmaps changes here, don't forget to
1336 * update HMIsMsrBitmapActive().
1337 *
1338 * We don't share MSR bitmaps between the guest and nested-guest as we then
1339 * don't need to care about carefully restoring the guest MSR bitmap.
1340 * The guest visible nested-guest MSR bitmap needs to remain unchanged.
1341 * Hence, allocate a separate MSR bitmap for the guest and nested-guest.
1342 */
1343 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
1344 {
1345 rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjMsrBitmap, &pVmcsInfo->pvMsrBitmap, &pVmcsInfo->HCPhysMsrBitmap);
1346 if (RT_SUCCESS(rc))
1347 ASMMemFill32(pVmcsInfo->pvMsrBitmap, X86_PAGE_4K_SIZE, UINT32_C(0xffffffff));
1348 }
1349
1350 if (RT_SUCCESS(rc))
1351 {
1352 /*
1353 * Allocate the VM-entry MSR-load area for the guest MSRs.
1354 *
1355 * Similar to MSR-bitmaps, we do not share the auto MSR-load/store are between
1356 * the guest and nested-guest.
1357 */
1358 rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjGuestMsrLoad, &pVmcsInfo->pvGuestMsrLoad,
1359 &pVmcsInfo->HCPhysGuestMsrLoad);
1360 if (RT_SUCCESS(rc))
1361 {
1362 /*
1363 * We use the same page for VM-entry MSR-load and VM-exit MSR store areas.
1364 * These contain the guest MSRs to load on VM-entry and store on VM-exit.
1365 */
1366 Assert(pVmcsInfo->hMemObjGuestMsrStore == NIL_RTR0MEMOBJ);
1367 pVmcsInfo->pvGuestMsrStore = pVmcsInfo->pvGuestMsrLoad;
1368 pVmcsInfo->HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrLoad;
1369
1370 /* Allocate the VM-exit MSR-load page for the host MSRs. */
1371 rc = hmR0VmxPageAllocZ(&pVmcsInfo->hMemObjHostMsrLoad, &pVmcsInfo->pvHostMsrLoad,
1372 &pVmcsInfo->HCPhysHostMsrLoad);
1373 }
1374 }
1375 }
1376 }
1377
1378 return rc;
1379}
1380
1381
1382/**
1383 * Free all VT-x structures for the VM.
1384 *
1385 * @returns IPRT status code.
1386 * @param pVM The cross context VM structure.
1387 */
1388static void hmR0VmxStructsFree(PVM pVM)
1389{
1390#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1391 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
1392#endif
1393 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
1394
1395 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1396 {
1397 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1398 PVMXVMCSINFO pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfo;
1399 hmR0VmxFreeVmcsInfo(pVM, pVmcsInfo);
1400#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1401 if (pVM->cpum.ro.GuestFeatures.fVmx)
1402 {
1403 pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfoNstGst;
1404 hmR0VmxFreeVmcsInfo(pVM, pVmcsInfo);
1405 }
1406#endif
1407 }
1408}
1409
1410
1411/**
1412 * Allocate all VT-x structures for the VM.
1413 *
1414 * @returns IPRT status code.
1415 * @param pVM The cross context VM structure.
1416 */
1417static int hmR0VmxStructsAlloc(PVM pVM)
1418{
1419 /*
1420 * Sanity check the VMCS size reported by the CPU as we assume 4KB allocations.
1421 * The VMCS size cannot be more than 4096 bytes.
1422 *
1423 * See Intel spec. Appendix A.1 "Basic VMX Information".
1424 */
1425 uint32_t const cbVmcs = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_SIZE);
1426 if (cbVmcs <= X86_PAGE_4K_SIZE)
1427 { /* likely */ }
1428 else
1429 {
1430 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE;
1431 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1432 }
1433
1434 /*
1435 * Initialize/check members up-front so we can cleanup en masse on allocation failures.
1436 */
1437#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1438 Assert(pVM->hm.s.vmx.hMemObjScratch == NIL_RTR0MEMOBJ);
1439 Assert(pVM->hm.s.vmx.pbScratch == NULL);
1440 pVM->hm.s.vmx.HCPhysScratch = NIL_RTHCPHYS;
1441#endif
1442
1443 Assert(pVM->hm.s.vmx.hMemObjApicAccess == NIL_RTR0MEMOBJ);
1444 Assert(pVM->hm.s.vmx.pbApicAccess == NULL);
1445 pVM->hm.s.vmx.HCPhysApicAccess = NIL_RTHCPHYS;
1446
1447 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1448 {
1449 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1450 hmR0VmxInitVmcsInfo(&pVCpu->hm.s.vmx.VmcsInfo);
1451 hmR0VmxInitVmcsInfo(&pVCpu->hm.s.vmx.VmcsInfoNstGst);
1452 }
1453
1454 /*
1455 * Allocate per-VM VT-x structures.
1456 */
1457 int rc = VINF_SUCCESS;
1458#ifdef VBOX_WITH_CRASHDUMP_MAGIC
1459 /* Allocate crash-dump magic scratch page. */
1460 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
1461 if (RT_FAILURE(rc))
1462 {
1463 hmR0VmxStructsFree(pVM);
1464 return rc;
1465 }
1466 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
1467 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
1468#endif
1469
1470 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
1471 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
1472 {
1473 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
1474 &pVM->hm.s.vmx.HCPhysApicAccess);
1475 if (RT_FAILURE(rc))
1476 {
1477 hmR0VmxStructsFree(pVM);
1478 return rc;
1479 }
1480 }
1481
1482 /*
1483 * Initialize per-VCPU VT-x structures.
1484 */
1485 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1486 {
1487 /* Allocate the guest VMCS structures. */
1488 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1489 rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hm.s.vmx.VmcsInfo, false /* fIsNstGstVmcs */);
1490 if (RT_SUCCESS(rc))
1491 {
1492#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1493 /* Allocate the nested-guest VMCS structures, when the VMX feature is exposed to the guest. */
1494 if (pVM->cpum.ro.GuestFeatures.fVmx)
1495 {
1496 rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hm.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */);
1497 if (RT_SUCCESS(rc))
1498 { /* likely */ }
1499 else
1500 break;
1501 }
1502#endif
1503 }
1504 else
1505 break;
1506 }
1507
1508 if (RT_FAILURE(rc))
1509 {
1510 hmR0VmxStructsFree(pVM);
1511 return rc;
1512 }
1513
1514 return VINF_SUCCESS;
1515}
1516
1517
1518#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1519/**
1520 * Returns whether an MSR at the given MSR-bitmap offset is intercepted or not.
1521 *
1522 * @returns @c true if the MSR is intercepted, @c false otherwise.
1523 * @param pvMsrBitmap The MSR bitmap.
1524 * @param offMsr The MSR byte offset.
1525 * @param iBit The bit offset from the byte offset.
1526 */
1527DECLINLINE(bool) hmR0VmxIsMsrBitSet(const void *pvMsrBitmap, uint16_t offMsr, int32_t iBit)
1528{
1529 uint8_t const * const pbMsrBitmap = (uint8_t const * const)pvMsrBitmap;
1530 Assert(pbMsrBitmap);
1531 Assert(offMsr + (iBit >> 3) <= X86_PAGE_4K_SIZE);
1532 return ASMBitTest(pbMsrBitmap + offMsr, iBit);
1533}
1534#endif
1535
1536
1537/**
1538 * Sets the permission bits for the specified MSR in the given MSR bitmap.
1539 *
1540 * If the passed VMCS is a nested-guest VMCS, this function ensures that the
1541 * read/write intercept is cleared from the MSR bitmap used for hardware-assisted
1542 * VMX execution of the nested-guest, only if nested-guest is also not intercepting
1543 * the read/write access of this MSR.
1544 *
1545 * @param pVCpu The cross context virtual CPU structure.
1546 * @param pVmcsInfo The VMCS info. object.
1547 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1548 * @param idMsr The MSR value.
1549 * @param fMsrpm The MSR permissions (see VMXMSRPM_XXX). This must
1550 * include both a read -and- a write permission!
1551 *
1552 * @sa CPUMGetVmxMsrPermission.
1553 * @remarks Can be called with interrupts disabled.
1554 */
1555static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs, uint32_t idMsr, uint32_t fMsrpm)
1556{
1557 uint8_t *pbMsrBitmap = (uint8_t *)pVmcsInfo->pvMsrBitmap;
1558 Assert(pbMsrBitmap);
1559 Assert(VMXMSRPM_IS_FLAG_VALID(fMsrpm));
1560
1561 /*
1562 * MSR-bitmap Layout:
1563 * Byte index MSR range Interpreted as
1564 * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits.
1565 * 0x400 - 0x7ff 0xc0000000 - 0xc0001fff High MSR read bits.
1566 * 0x800 - 0xbff 0x00000000 - 0x00001fff Low MSR write bits.
1567 * 0xc00 - 0xfff 0xc0000000 - 0xc0001fff High MSR write bits.
1568 *
1569 * A bit corresponding to an MSR within the above range causes a VM-exit
1570 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of
1571 * the MSR range, it always cause a VM-exit.
1572 *
1573 * See Intel spec. 24.6.9 "MSR-Bitmap Address".
1574 */
1575 uint16_t const offBitmapRead = 0;
1576 uint16_t const offBitmapWrite = 0x800;
1577 uint16_t offMsr;
1578 int32_t iBit;
1579 if (idMsr <= UINT32_C(0x00001fff))
1580 {
1581 offMsr = 0;
1582 iBit = idMsr;
1583 }
1584 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
1585 {
1586 offMsr = 0x400;
1587 iBit = idMsr - UINT32_C(0xc0000000);
1588 }
1589 else
1590 AssertMsgFailedReturnVoid(("Invalid MSR %#RX32\n", idMsr));
1591
1592 /*
1593 * Set the MSR read permission.
1594 */
1595 uint16_t const offMsrRead = offBitmapRead + offMsr;
1596 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
1597 if (fMsrpm & VMXMSRPM_ALLOW_RD)
1598 {
1599#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1600 bool const fClear = !fIsNstGstVmcs ? true
1601 : !hmR0VmxIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), offMsrRead, iBit);
1602#else
1603 RT_NOREF2(pVCpu, fIsNstGstVmcs);
1604 bool const fClear = true;
1605#endif
1606 if (fClear)
1607 ASMBitClear(pbMsrBitmap + offMsrRead, iBit);
1608 }
1609 else
1610 ASMBitSet(pbMsrBitmap + offMsrRead, iBit);
1611
1612 /*
1613 * Set the MSR write permission.
1614 */
1615 uint16_t const offMsrWrite = offBitmapWrite + offMsr;
1616 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
1617 if (fMsrpm & VMXMSRPM_ALLOW_WR)
1618 {
1619#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1620 bool const fClear = !fIsNstGstVmcs ? true
1621 : !hmR0VmxIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), offMsrWrite, iBit);
1622#else
1623 RT_NOREF2(pVCpu, fIsNstGstVmcs);
1624 bool const fClear = true;
1625#endif
1626 if (fClear)
1627 ASMBitClear(pbMsrBitmap + offMsrWrite, iBit);
1628 }
1629 else
1630 ASMBitSet(pbMsrBitmap + offMsrWrite, iBit);
1631}
1632
1633
1634/**
1635 * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
1636 * area.
1637 *
1638 * @returns VBox status code.
1639 * @param pVCpu The cross context virtual CPU structure.
1640 * @param pVmcsInfo The VMCS info. object.
1641 * @param cMsrs The number of MSRs.
1642 */
1643static int hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t cMsrs)
1644{
1645 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
1646 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc);
1647 if (RT_LIKELY(cMsrs < cMaxSupportedMsrs))
1648 {
1649 /* Commit the MSR counts to the VMCS and update the cache. */
1650 if (pVmcsInfo->cEntryMsrLoad != cMsrs)
1651 {
1652 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs);
1653 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs);
1654 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs);
1655 AssertRCReturn(rc, rc);
1656
1657 pVmcsInfo->cEntryMsrLoad = cMsrs;
1658 pVmcsInfo->cExitMsrStore = cMsrs;
1659 pVmcsInfo->cExitMsrLoad = cMsrs;
1660 }
1661 return VINF_SUCCESS;
1662 }
1663
1664 LogRel(("Auto-load/store MSR count exceeded! cMsrs=%u MaxSupported=%u\n", cMsrs, cMaxSupportedMsrs));
1665 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
1666 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1667}
1668
1669
1670/**
1671 * Adds a new (or updates the value of an existing) guest/host MSR
1672 * pair to be swapped during the world-switch as part of the
1673 * auto-load/store MSR area in the VMCS.
1674 *
1675 * @returns VBox status code.
1676 * @param pVCpu The cross context virtual CPU structure.
1677 * @param pVmxTransient The VMX-transient structure.
1678 * @param idMsr The MSR.
1679 * @param uGuestMsrValue Value of the guest MSR.
1680 * @param fSetReadWrite Whether to set the guest read/write access of this
1681 * MSR (thus not causing a VM-exit).
1682 * @param fUpdateHostMsr Whether to update the value of the host MSR if
1683 * necessary.
1684 */
1685static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t idMsr, uint64_t uGuestMsrValue,
1686 bool fSetReadWrite, bool fUpdateHostMsr)
1687{
1688 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1689 bool const fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
1690 PVMXAUTOMSR pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
1691 uint32_t cMsrs = pVmcsInfo->cEntryMsrLoad;
1692 uint32_t i;
1693
1694 /* Paranoia. */
1695 Assert(pGuestMsrLoad);
1696
1697 LogFlowFunc(("pVCpu=%p idMsr=%#RX32 uGestMsrValue=%#RX64\n", pVCpu, idMsr, uGuestMsrValue));
1698
1699 /* Check if the MSR already exists in the VM-entry MSR-load area. */
1700 for (i = 0; i < cMsrs; i++)
1701 {
1702 if (pGuestMsrLoad[i].u32Msr == idMsr)
1703 break;
1704 }
1705
1706 bool fAdded = false;
1707 if (i == cMsrs)
1708 {
1709 /* The MSR does not exist, bump the MSR count to make room for the new MSR. */
1710 ++cMsrs;
1711 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
1712 AssertMsgRCReturn(rc, ("Insufficient space to add MSR to VM-entry MSR-load/store area %u\n", idMsr), rc);
1713
1714 /* Set the guest to read/write this MSR without causing VM-exits. */
1715 if ( fSetReadWrite
1716 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
1717 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_ALLOW_RD_WR);
1718
1719 LogFlowFunc(("MSR added, cMsrs now %u\n", cMsrs));
1720 fAdded = true;
1721 }
1722
1723 /* Update the MSR value for the newly added or already existing MSR. */
1724 pGuestMsrLoad[i].u32Msr = idMsr;
1725 pGuestMsrLoad[i].u64Value = uGuestMsrValue;
1726
1727 /* Create the corresponding slot in the VM-exit MSR-store area if we use a different page. */
1728 if (hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
1729 {
1730 PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
1731 pGuestMsrStore[i].u32Msr = idMsr;
1732 pGuestMsrStore[i].u64Value = uGuestMsrValue;
1733 }
1734
1735 /* Update the corresponding slot in the host MSR area. */
1736 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
1737 Assert(pHostMsr != pVmcsInfo->pvGuestMsrLoad);
1738 Assert(pHostMsr != pVmcsInfo->pvGuestMsrStore);
1739 pHostMsr[i].u32Msr = idMsr;
1740
1741 /*
1742 * Only if the caller requests to update the host MSR value AND we've newly added the
1743 * MSR to the host MSR area do we actually update the value. Otherwise, it will be
1744 * updated by hmR0VmxUpdateAutoLoadHostMsrs().
1745 *
1746 * We do this for performance reasons since reading MSRs may be quite expensive.
1747 */
1748 if (fAdded)
1749 {
1750 if (fUpdateHostMsr)
1751 {
1752 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1753 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1754 pHostMsr[i].u64Value = ASMRdMsr(idMsr);
1755 }
1756 else
1757 {
1758 /* Someone else can do the work. */
1759 pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = false;
1760 }
1761 }
1762 return VINF_SUCCESS;
1763}
1764
1765
1766/**
1767 * Removes a guest/host MSR pair to be swapped during the world-switch from the
1768 * auto-load/store MSR area in the VMCS.
1769 *
1770 * @returns VBox status code.
1771 * @param pVCpu The cross context virtual CPU structure.
1772 * @param pVmxTransient The VMX-transient structure.
1773 * @param idMsr The MSR.
1774 */
1775static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t idMsr)
1776{
1777 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1778 bool const fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
1779 PVMXAUTOMSR pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
1780 uint32_t cMsrs = pVmcsInfo->cEntryMsrLoad;
1781
1782 LogFlowFunc(("pVCpu=%p idMsr=%#RX32\n", pVCpu, idMsr));
1783
1784 for (uint32_t i = 0; i < cMsrs; i++)
1785 {
1786 /* Find the MSR. */
1787 if (pGuestMsrLoad[i].u32Msr == idMsr)
1788 {
1789 /*
1790 * If it's the last MSR, we only need to reduce the MSR count.
1791 * If it's -not- the last MSR, copy the last MSR in place of it and reduce the MSR count.
1792 */
1793 if (i < cMsrs - 1)
1794 {
1795 /* Remove it from the VM-entry MSR-load area. */
1796 pGuestMsrLoad[i].u32Msr = pGuestMsrLoad[cMsrs - 1].u32Msr;
1797 pGuestMsrLoad[i].u64Value = pGuestMsrLoad[cMsrs - 1].u64Value;
1798
1799 /* Remove it from the VM-exit MSR-store area if it's in a different page. */
1800 if (hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
1801 {
1802 PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
1803 Assert(pGuestMsrStore[i].u32Msr == idMsr);
1804 pGuestMsrStore[i].u32Msr = pGuestMsrStore[cMsrs - 1].u32Msr;
1805 pGuestMsrStore[i].u64Value = pGuestMsrStore[cMsrs - 1].u64Value;
1806 }
1807
1808 /* Remove it from the VM-exit MSR-load area. */
1809 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
1810 Assert(pHostMsr[i].u32Msr == idMsr);
1811 pHostMsr[i].u32Msr = pHostMsr[cMsrs - 1].u32Msr;
1812 pHostMsr[i].u64Value = pHostMsr[cMsrs - 1].u64Value;
1813 }
1814
1815 /* Reduce the count to reflect the removed MSR and bail. */
1816 --cMsrs;
1817 break;
1818 }
1819 }
1820
1821 /* Update the VMCS if the count changed (meaning the MSR was found and removed). */
1822 if (cMsrs != pVmcsInfo->cEntryMsrLoad)
1823 {
1824 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
1825 AssertRCReturn(rc, rc);
1826
1827 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
1828 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
1829 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
1830
1831 Log4Func(("Removed MSR %#RX32, cMsrs=%u\n", idMsr, cMsrs));
1832 return VINF_SUCCESS;
1833 }
1834
1835 return VERR_NOT_FOUND;
1836}
1837
1838
1839/**
1840 * Checks if the specified guest MSR is part of the VM-entry MSR-load area.
1841 *
1842 * @returns @c true if found, @c false otherwise.
1843 * @param pVmcsInfo The VMCS info. object.
1844 * @param idMsr The MSR to find.
1845 */
1846static bool hmR0VmxIsAutoLoadGuestMsr(PCVMXVMCSINFO pVmcsInfo, uint32_t idMsr)
1847{
1848 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
1849 uint32_t const cMsrs = pVmcsInfo->cEntryMsrLoad;
1850 Assert(pMsrs);
1851 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
1852 for (uint32_t i = 0; i < cMsrs; i++)
1853 {
1854 if (pMsrs[i].u32Msr == idMsr)
1855 return true;
1856 }
1857 return false;
1858}
1859
1860
1861/**
1862 * Updates the value of all host MSRs in the VM-exit MSR-load area.
1863 *
1864 * @param pVCpu The cross context virtual CPU structure.
1865 * @param pVmcsInfo The VMCS info. object.
1866 *
1867 * @remarks No-long-jump zone!!!
1868 */
1869static void hmR0VmxUpdateAutoLoadHostMsrs(PCVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
1870{
1871 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1872
1873 PVMXAUTOMSR pHostMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
1874 uint32_t const cMsrs = pVmcsInfo->cExitMsrLoad;
1875 Assert(pHostMsrLoad);
1876 Assert(sizeof(*pHostMsrLoad) * cMsrs <= X86_PAGE_4K_SIZE);
1877 LogFlowFunc(("pVCpu=%p cMsrs=%u\n", pVCpu, cMsrs));
1878 for (uint32_t i = 0; i < cMsrs; i++)
1879 {
1880 /*
1881 * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
1882 * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
1883 */
1884 if (pHostMsrLoad[i].u32Msr == MSR_K6_EFER)
1885 pHostMsrLoad[i].u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostMsrEfer;
1886 else
1887 pHostMsrLoad[i].u64Value = ASMRdMsr(pHostMsrLoad[i].u32Msr);
1888 }
1889}
1890
1891
1892/**
1893 * Saves a set of host MSRs to allow read/write passthru access to the guest and
1894 * perform lazy restoration of the host MSRs while leaving VT-x.
1895 *
1896 * @param pVCpu The cross context virtual CPU structure.
1897 *
1898 * @remarks No-long-jump zone!!!
1899 */
1900static void hmR0VmxLazySaveHostMsrs(PVMCPU pVCpu)
1901{
1902 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1903
1904 /*
1905 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap accesses in hmR0VmxSetupVmcsProcCtls().
1906 */
1907 if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST))
1908 {
1909 Assert(!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)); /* Guest MSRs better not be loaded now. */
1910#if HC_ARCH_BITS == 64
1911 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
1912 {
1913 pVCpu->hm.s.vmx.u64HostMsrLStar = ASMRdMsr(MSR_K8_LSTAR);
1914 pVCpu->hm.s.vmx.u64HostMsrStar = ASMRdMsr(MSR_K6_STAR);
1915 pVCpu->hm.s.vmx.u64HostMsrSfMask = ASMRdMsr(MSR_K8_SF_MASK);
1916 pVCpu->hm.s.vmx.u64HostMsrKernelGsBase = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
1917 }
1918#endif
1919 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;
1920 }
1921}
1922
1923
1924/**
1925 * Checks whether the MSR belongs to the set of guest MSRs that we restore
1926 * lazily while leaving VT-x.
1927 *
1928 * @returns true if it does, false otherwise.
1929 * @param pVCpu The cross context virtual CPU structure.
1930 * @param idMsr The MSR to check.
1931 */
1932static bool hmR0VmxIsLazyGuestMsr(PCVMCPU pVCpu, uint32_t idMsr)
1933{
1934 NOREF(pVCpu);
1935#if HC_ARCH_BITS == 64
1936 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
1937 {
1938 switch (idMsr)
1939 {
1940 case MSR_K8_LSTAR:
1941 case MSR_K6_STAR:
1942 case MSR_K8_SF_MASK:
1943 case MSR_K8_KERNEL_GS_BASE:
1944 return true;
1945 }
1946 }
1947#else
1948 RT_NOREF(pVCpu, idMsr);
1949#endif
1950 return false;
1951}
1952
1953
1954/**
1955 * Loads a set of guests MSRs to allow read/passthru to the guest.
1956 *
1957 * The name of this function is slightly confusing. This function does NOT
1958 * postpone loading, but loads the MSR right now. "hmR0VmxLazy" is simply a
1959 * common prefix for functions dealing with "lazy restoration" of the shared
1960 * MSRs.
1961 *
1962 * @param pVCpu The cross context virtual CPU structure.
1963 *
1964 * @remarks No-long-jump zone!!!
1965 */
1966static void hmR0VmxLazyLoadGuestMsrs(PVMCPU pVCpu)
1967{
1968 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1969 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
1970
1971 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
1972#if HC_ARCH_BITS == 64
1973 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
1974 {
1975 /*
1976 * If the guest MSRs are not loaded -and- if all the guest MSRs are identical
1977 * to the MSRs on the CPU (which are the saved host MSRs, see assertion above) then
1978 * we can skip a few MSR writes.
1979 *
1980 * Otherwise, it implies either 1. they're not loaded, or 2. they're loaded but the
1981 * guest MSR values in the guest-CPU context might be different to what's currently
1982 * loaded in the CPU. In either case, we need to write the new guest MSR values to the
1983 * CPU, see @bugref{8728}.
1984 */
1985 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
1986 if ( !(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
1987 && pCtx->msrKERNELGSBASE == pVCpu->hm.s.vmx.u64HostMsrKernelGsBase
1988 && pCtx->msrLSTAR == pVCpu->hm.s.vmx.u64HostMsrLStar
1989 && pCtx->msrSTAR == pVCpu->hm.s.vmx.u64HostMsrStar
1990 && pCtx->msrSFMASK == pVCpu->hm.s.vmx.u64HostMsrSfMask)
1991 {
1992#ifdef VBOX_STRICT
1993 Assert(ASMRdMsr(MSR_K8_KERNEL_GS_BASE) == pCtx->msrKERNELGSBASE);
1994 Assert(ASMRdMsr(MSR_K8_LSTAR) == pCtx->msrLSTAR);
1995 Assert(ASMRdMsr(MSR_K6_STAR) == pCtx->msrSTAR);
1996 Assert(ASMRdMsr(MSR_K8_SF_MASK) == pCtx->msrSFMASK);
1997#endif
1998 }
1999 else
2000 {
2001 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE);
2002 ASMWrMsr(MSR_K8_LSTAR, pCtx->msrLSTAR);
2003 ASMWrMsr(MSR_K6_STAR, pCtx->msrSTAR);
2004 ASMWrMsr(MSR_K8_SF_MASK, pCtx->msrSFMASK);
2005 }
2006 }
2007#endif
2008 pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;
2009}
2010
2011
2012/**
2013 * Performs lazy restoration of the set of host MSRs if they were previously
2014 * loaded with guest MSR values.
2015 *
2016 * @param pVCpu The cross context virtual CPU structure.
2017 *
2018 * @remarks No-long-jump zone!!!
2019 * @remarks The guest MSRs should have been saved back into the guest-CPU
2020 * context by hmR0VmxImportGuestState()!!!
2021 */
2022static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu)
2023{
2024 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2025 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2026
2027 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
2028 {
2029 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
2030#if HC_ARCH_BITS == 64
2031 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
2032 {
2033 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hm.s.vmx.u64HostMsrLStar);
2034 ASMWrMsr(MSR_K6_STAR, pVCpu->hm.s.vmx.u64HostMsrStar);
2035 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hm.s.vmx.u64HostMsrSfMask);
2036 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostMsrKernelGsBase);
2037 }
2038#endif
2039 }
2040 pVCpu->hm.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST);
2041}
2042
2043
2044/**
2045 * Verifies that our cached values of the VMCS fields are all consistent with
2046 * what's actually present in the VMCS.
2047 *
2048 * @returns VBox status code.
2049 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
2050 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
2051 * VMCS content. HMCPU error-field is
2052 * updated, see VMX_VCI_XXX.
2053 * @param pVCpu The cross context virtual CPU structure.
2054 * @param pVmcsInfo The VMCS info. object.
2055 */
2056static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
2057{
2058 uint32_t u32Val;
2059 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
2060 AssertRCReturn(rc, rc);
2061 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
2062 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32EntryCtls, u32Val),
2063 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_ENTRY,
2064 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2065
2066 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
2067 AssertRCReturn(rc, rc);
2068 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
2069 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32ExitCtls, u32Val),
2070 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_EXIT,
2071 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2072
2073 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
2074 AssertRCReturn(rc, rc);
2075 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
2076 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32PinCtls, u32Val),
2077 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PIN_EXEC,
2078 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2079
2080 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
2081 AssertRCReturn(rc, rc);
2082 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
2083 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32ProcCtls, u32Val),
2084 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC,
2085 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2086
2087 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
2088 {
2089 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
2090 AssertRCReturn(rc, rc);
2091 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
2092 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32ProcCtls2, u32Val),
2093 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
2094 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2095 }
2096
2097 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
2098 AssertRCReturn(rc, rc);
2099 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
2100 ("Cache=%#RX32 VMCS=%#RX32\n", pVmcsInfo->u32XcptBitmap, u32Val),
2101 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
2102 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2103
2104 uint64_t u64Val;
2105 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
2106 AssertRCReturn(rc, rc);
2107 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
2108 ("Cache=%#RX64 VMCS=%#RX64\n", pVmcsInfo->u64TscOffset, u64Val),
2109 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
2110 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2111
2112 return VINF_SUCCESS;
2113}
2114
2115
2116#ifdef VBOX_STRICT
2117/**
2118 * Verifies that our cached host EFER MSR value has not changed since we cached it.
2119 *
2120 * @param pVCpu The cross context virtual CPU structure.
2121 * @param pVmcsInfo The VMCS info. object.
2122 */
2123static void hmR0VmxCheckHostEferMsr(PCVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
2124{
2125 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2126
2127 if (pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
2128 {
2129 uint64_t const uHostEferMsr = ASMRdMsr(MSR_K6_EFER);
2130 uint64_t const uHostEferMsrCache = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostMsrEfer;
2131 uint64_t uVmcsEferMsrVmcs;
2132 int rc = VMXReadVmcs64(VMX_VMCS64_HOST_EFER_FULL, &uVmcsEferMsrVmcs);
2133 AssertRC(rc);
2134
2135 AssertMsgReturnVoid(uHostEferMsr == uVmcsEferMsrVmcs,
2136 ("EFER Host/VMCS mismatch! host=%#RX64 vmcs=%#RX64\n", uHostEferMsr, uVmcsEferMsrVmcs));
2137 AssertMsgReturnVoid(uHostEferMsr == uHostEferMsrCache,
2138 ("EFER Host/Cache mismatch! host=%#RX64 cache=%#RX64\n", uHostEferMsr, uHostEferMsrCache));
2139 }
2140}
2141
2142
2143/**
2144 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the
2145 * VMCS are correct.
2146 *
2147 * @param pVCpu The cross context virtual CPU structure.
2148 * @param pVmcsInfo The VMCS info. object.
2149 */
2150static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
2151{
2152 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2153
2154 /* Read the various MSR-area counts from the VMCS. */
2155 uint32_t cEntryLoadMsrs;
2156 uint32_t cExitStoreMsrs;
2157 uint32_t cExitLoadMsrs;
2158 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cEntryLoadMsrs); AssertRC(rc);
2159 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cExitStoreMsrs); AssertRC(rc);
2160 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cExitLoadMsrs); AssertRC(rc);
2161
2162 /* Verify all the MSR counts are the same. */
2163 Assert(cEntryLoadMsrs == cExitStoreMsrs);
2164 Assert(cExitStoreMsrs == cExitLoadMsrs);
2165 uint32_t const cMsrs = cExitLoadMsrs;
2166
2167 /* Verify the MSR counts do not exceed the maximum count supported by the hardware. */
2168 Assert(cMsrs < VMX_MISC_MAX_MSRS(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc));
2169
2170 /* Verify the MSR counts are within the allocated page size. */
2171 Assert(sizeof(VMXAUTOMSR) * cMsrs <= X86_PAGE_4K_SIZE);
2172
2173 /* Verify the relevant contents of the MSR areas match. */
2174 PCVMXAUTOMSR pGuestMsrLoad = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
2175 PCVMXAUTOMSR pGuestMsrStore = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
2176 PCVMXAUTOMSR pHostMsrLoad = (PCVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
2177 bool const fSeparateExitMsrStorePage = hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo);
2178 for (uint32_t i = 0; i < cMsrs; i++)
2179 {
2180 /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */
2181 if (fSeparateExitMsrStorePage)
2182 {
2183 AssertMsgReturnVoid(pGuestMsrLoad->u32Msr == pGuestMsrStore->u32Msr,
2184 ("GuestMsrLoad=%#RX32 GuestMsrStore=%#RX32 cMsrs=%u\n",
2185 pGuestMsrLoad->u32Msr, pGuestMsrStore->u32Msr, cMsrs));
2186 }
2187
2188 AssertMsgReturnVoid(pHostMsrLoad->u32Msr == pGuestMsrLoad->u32Msr,
2189 ("HostMsrLoad=%#RX32 GuestMsrLoad=%#RX32 cMsrs=%u\n",
2190 pHostMsrLoad->u32Msr, pGuestMsrLoad->u32Msr, cMsrs));
2191
2192 uint64_t const u64Msr = ASMRdMsr(pHostMsrLoad->u32Msr);
2193 AssertMsgReturnVoid(pHostMsrLoad->u64Value == u64Msr,
2194 ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
2195 pHostMsrLoad->u32Msr, pHostMsrLoad->u64Value, u64Msr, cMsrs));
2196
2197 /* Verify that cached host EFER MSR matches what's loaded the CPU. */
2198 bool const fIsEferMsr = RT_BOOL(pHostMsrLoad->u32Msr == MSR_K6_EFER);
2199 if (fIsEferMsr)
2200 {
2201 AssertMsgReturnVoid(u64Msr == pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostMsrEfer,
2202 ("Cached=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
2203 pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostMsrEfer, u64Msr, cMsrs));
2204 }
2205
2206 /* Verify that the accesses are as expected in the MSR bitmap for auto-load/store MSRs. */
2207 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
2208 {
2209 uint32_t const fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, pGuestMsrLoad->u32Msr);
2210 if (fIsEferMsr)
2211 {
2212 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_EXIT_RD), ("Passthru read for EFER MSR!?\n"));
2213 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_EXIT_WR), ("Passthru write for EFER MSR!?\n"));
2214 }
2215 else
2216 {
2217 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_ALLOW_RD_WR) == VMXMSRPM_ALLOW_RD_WR,
2218 ("u32Msr=%#RX32 cMsrs=%u No passthru read/write!\n", pGuestMsrLoad->u32Msr, cMsrs));
2219 }
2220 }
2221
2222 /* Move to the next MSR. */
2223 pHostMsrLoad++;
2224 pGuestMsrLoad++;
2225 pGuestMsrStore++;
2226 }
2227}
2228#endif /* VBOX_STRICT */
2229
2230
2231/**
2232 * Flushes the TLB using EPT.
2233 *
2234 * @returns VBox status code.
2235 * @param pVCpu The cross context virtual CPU structure of the calling
2236 * EMT. Can be NULL depending on @a enmTlbFlush.
2237 * @param pVmcsInfo The VMCS info. object. Can be NULL depending on @a
2238 * enmTlbFlush.
2239 * @param enmTlbFlush Type of flush.
2240 *
2241 * @remarks Caller is responsible for making sure this function is called only
2242 * when NestedPaging is supported and providing @a enmTlbFlush that is
2243 * supported by the CPU.
2244 * @remarks Can be called with interrupts disabled.
2245 */
2246static void hmR0VmxFlushEpt(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, VMXTLBFLUSHEPT enmTlbFlush)
2247{
2248 uint64_t au64Descriptor[2];
2249 if (enmTlbFlush == VMXTLBFLUSHEPT_ALL_CONTEXTS)
2250 au64Descriptor[0] = 0;
2251 else
2252 {
2253 Assert(pVCpu);
2254 Assert(pVmcsInfo);
2255 au64Descriptor[0] = pVmcsInfo->HCPhysEPTP;
2256 }
2257 au64Descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
2258
2259 int rc = VMXR0InvEPT(enmTlbFlush, &au64Descriptor[0]);
2260 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %#RHp failed. rc=%Rrc\n", enmTlbFlush, au64Descriptor[0], rc));
2261
2262 if ( RT_SUCCESS(rc)
2263 && pVCpu)
2264 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
2265}
2266
2267
2268/**
2269 * Flushes the TLB using VPID.
2270 *
2271 * @returns VBox status code.
2272 * @param pVCpu The cross context virtual CPU structure of the calling
2273 * EMT. Can be NULL depending on @a enmTlbFlush.
2274 * @param enmTlbFlush Type of flush.
2275 * @param GCPtr Virtual address of the page to flush (can be 0 depending
2276 * on @a enmTlbFlush).
2277 *
2278 * @remarks Can be called with interrupts disabled.
2279 */
2280static void hmR0VmxFlushVpid(PVMCPU pVCpu, VMXTLBFLUSHVPID enmTlbFlush, RTGCPTR GCPtr)
2281{
2282 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid);
2283
2284 uint64_t au64Descriptor[2];
2285 if (enmTlbFlush == VMXTLBFLUSHVPID_ALL_CONTEXTS)
2286 {
2287 au64Descriptor[0] = 0;
2288 au64Descriptor[1] = 0;
2289 }
2290 else
2291 {
2292 AssertPtr(pVCpu);
2293 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
2294 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
2295 au64Descriptor[0] = pVCpu->hm.s.uCurrentAsid;
2296 au64Descriptor[1] = GCPtr;
2297 }
2298
2299 int rc = VMXR0InvVPID(enmTlbFlush, &au64Descriptor[0]);
2300 AssertMsg(rc == VINF_SUCCESS,
2301 ("VMXR0InvVPID %#x %u %RGv failed with %Rrc\n", enmTlbFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
2302
2303 if ( RT_SUCCESS(rc)
2304 && pVCpu)
2305 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
2306 NOREF(rc);
2307}
2308
2309
2310/**
2311 * Invalidates a guest page by guest virtual address. Only relevant for EPT/VPID,
2312 * otherwise there is nothing really to invalidate.
2313 *
2314 * @returns VBox status code.
2315 * @param pVCpu The cross context virtual CPU structure.
2316 * @param GCVirt Guest virtual address of the page to invalidate.
2317 */
2318VMMR0DECL(int) VMXR0InvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
2319{
2320 AssertPtr(pVCpu);
2321 LogFlowFunc(("pVCpu=%p GCVirt=%RGv\n", pVCpu, GCVirt));
2322
2323 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
2324 {
2325 /*
2326 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for
2327 * the EPT case. See @bugref{6043} and @bugref{6177}.
2328 *
2329 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*()
2330 * as this function maybe called in a loop with individual addresses.
2331 */
2332 PVM pVM = pVCpu->CTX_SUFF(pVM);
2333 if (pVM->hm.s.vmx.fVpid)
2334 {
2335 bool fVpidFlush = RT_BOOL(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
2336
2337#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
2338 /*
2339 * Workaround Erratum BV75, AAJ159 and others that affect several Intel CPUs
2340 * where executing INVVPID outside 64-bit mode does not flush translations of
2341 * 64-bit linear addresses, see @bugref{6208#c72}.
2342 */
2343 if (RT_HI_U32(GCVirt))
2344 fVpidFlush = false;
2345#endif
2346
2347 if (fVpidFlush)
2348 {
2349 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_INDIV_ADDR, GCVirt);
2350 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
2351 }
2352 else
2353 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2354 }
2355 else if (pVM->hm.s.fNestedPaging)
2356 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2357 }
2358
2359 return VINF_SUCCESS;
2360}
2361
2362
2363/**
2364 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
2365 * case where neither EPT nor VPID is supported by the CPU.
2366 *
2367 * @param pHostCpu The HM physical-CPU structure.
2368 * @param pVCpu The cross context virtual CPU structure.
2369 *
2370 * @remarks Called with interrupts disabled.
2371 */
2372static void hmR0VmxFlushTaggedTlbNone(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
2373{
2374 AssertPtr(pVCpu);
2375 AssertPtr(pHostCpu);
2376
2377 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
2378
2379 Assert(pHostCpu->idCpu != NIL_RTCPUID);
2380 pVCpu->hm.s.idLastCpu = pHostCpu->idCpu;
2381 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;
2382 pVCpu->hm.s.fForceTLBFlush = false;
2383 return;
2384}
2385
2386
2387/**
2388 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
2389 *
2390 * @param pHostCpu The HM physical-CPU structure.
2391 * @param pVCpu The cross context virtual CPU structure.
2392 * @param pVmcsInfo The VMCS info. object.
2393 *
2394 * @remarks All references to "ASID" in this function pertains to "VPID" in Intel's
2395 * nomenclature. The reason is, to avoid confusion in compare statements
2396 * since the host-CPU copies are named "ASID".
2397 *
2398 * @remarks Called with interrupts disabled.
2399 */
2400static void hmR0VmxFlushTaggedTlbBoth(PHMPHYSCPU pHostCpu, PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
2401{
2402#ifdef VBOX_WITH_STATISTICS
2403 bool fTlbFlushed = false;
2404# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
2405# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
2406 if (!fTlbFlushed) \
2407 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
2408 } while (0)
2409#else
2410# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
2411# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
2412#endif
2413
2414 AssertPtr(pVCpu);
2415 AssertPtr(pHostCpu);
2416 Assert(pHostCpu->idCpu != NIL_RTCPUID);
2417
2418 PVM pVM = pVCpu->CTX_SUFF(pVM);
2419 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
2420 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
2421 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
2422
2423 /*
2424 * Force a TLB flush for the first world-switch if the current CPU differs from the one we
2425 * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID
2426 * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we
2427 * cannot reuse the current ASID anymore.
2428 */
2429 if ( pVCpu->hm.s.idLastCpu != pHostCpu->idCpu
2430 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes)
2431 {
2432 ++pHostCpu->uCurrentAsid;
2433 if (pHostCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
2434 {
2435 pHostCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
2436 pHostCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
2437 pHostCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
2438 }
2439
2440 pVCpu->hm.s.uCurrentAsid = pHostCpu->uCurrentAsid;
2441 pVCpu->hm.s.idLastCpu = pHostCpu->idCpu;
2442 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;
2443
2444 /*
2445 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
2446 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
2447 */
2448 hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVM->hm.s.vmx.enmTlbFlushEpt);
2449 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2450 HMVMX_SET_TAGGED_TLB_FLUSHED();
2451 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
2452 }
2453 else if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) /* Check for explicit TLB flushes. */
2454 {
2455 /*
2456 * Changes to the EPT paging structure by VMM requires flushing-by-EPT as the CPU
2457 * creates guest-physical (ie. only EPT-tagged) mappings while traversing the EPT
2458 * tables when EPT is in use. Flushing-by-VPID will only flush linear (only
2459 * VPID-tagged) and combined (EPT+VPID tagged) mappings but not guest-physical
2460 * mappings, see @bugref{6568}.
2461 *
2462 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information".
2463 */
2464 hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVM->hm.s.vmx.enmTlbFlushEpt);
2465 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2466 HMVMX_SET_TAGGED_TLB_FLUSHED();
2467 }
2468
2469 pVCpu->hm.s.fForceTLBFlush = false;
2470 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
2471
2472 Assert(pVCpu->hm.s.idLastCpu == pHostCpu->idCpu);
2473 Assert(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes);
2474 AssertMsg(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes,
2475 ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pHostCpu->cTlbFlushes));
2476 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2477 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu,
2478 pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2479 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2480 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2481
2482 /* Update VMCS with the VPID. */
2483 int rc = VMXWriteVmcs32(VMX_VMCS16_VPID, pVCpu->hm.s.uCurrentAsid);
2484 AssertRC(rc);
2485
2486#undef HMVMX_SET_TAGGED_TLB_FLUSHED
2487}
2488
2489
2490/**
2491 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
2492 *
2493 * @param pHostCpu The HM physical-CPU structure.
2494 * @param pVCpu The cross context virtual CPU structure.
2495 * @param pVmcsInfo The VMCS info. object.
2496 *
2497 * @remarks Called with interrupts disabled.
2498 */
2499static void hmR0VmxFlushTaggedTlbEpt(PHMPHYSCPU pHostCpu, PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
2500{
2501 AssertPtr(pVCpu);
2502 AssertPtr(pHostCpu);
2503 Assert(pHostCpu->idCpu != NIL_RTCPUID);
2504 AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked without NestedPaging."));
2505 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID."));
2506
2507 /*
2508 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
2509 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
2510 */
2511 if ( pVCpu->hm.s.idLastCpu != pHostCpu->idCpu
2512 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes)
2513 {
2514 pVCpu->hm.s.fForceTLBFlush = true;
2515 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2516 }
2517
2518 /* Check for explicit TLB flushes. */
2519 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2520 {
2521 pVCpu->hm.s.fForceTLBFlush = true;
2522 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2523 }
2524
2525 pVCpu->hm.s.idLastCpu = pHostCpu->idCpu;
2526 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;
2527
2528 if (pVCpu->hm.s.fForceTLBFlush)
2529 {
2530 hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVCpu->CTX_SUFF(pVM)->hm.s.vmx.enmTlbFlushEpt);
2531 pVCpu->hm.s.fForceTLBFlush = false;
2532 }
2533}
2534
2535
2536/**
2537 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
2538 *
2539 * @param pHostCpu The HM physical-CPU structure.
2540 * @param pVCpu The cross context virtual CPU structure.
2541 *
2542 * @remarks Called with interrupts disabled.
2543 */
2544static void hmR0VmxFlushTaggedTlbVpid(PHMPHYSCPU pHostCpu, PVMCPU pVCpu)
2545{
2546 AssertPtr(pVCpu);
2547 AssertPtr(pHostCpu);
2548 Assert(pHostCpu->idCpu != NIL_RTCPUID);
2549 AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked without VPID."));
2550 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging"));
2551
2552 /*
2553 * Force a TLB flush for the first world switch if the current CPU differs from the one we
2554 * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID
2555 * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we
2556 * cannot reuse the current ASID anymore.
2557 */
2558 if ( pVCpu->hm.s.idLastCpu != pHostCpu->idCpu
2559 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes)
2560 {
2561 pVCpu->hm.s.fForceTLBFlush = true;
2562 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2563 }
2564
2565 /* Check for explicit TLB flushes. */
2566 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2567 {
2568 /*
2569 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see
2570 * hmR0VmxSetupTaggedTlb()) we would need to explicitly flush in this case (add an
2571 * fExplicitFlush = true here and change the pHostCpu->fFlushAsidBeforeUse check below to
2572 * include fExplicitFlush's too) - an obscure corner case.
2573 */
2574 pVCpu->hm.s.fForceTLBFlush = true;
2575 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
2576 }
2577
2578 PVM pVM = pVCpu->CTX_SUFF(pVM);
2579 pVCpu->hm.s.idLastCpu = pHostCpu->idCpu;
2580 if (pVCpu->hm.s.fForceTLBFlush)
2581 {
2582 ++pHostCpu->uCurrentAsid;
2583 if (pHostCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
2584 {
2585 pHostCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
2586 pHostCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
2587 pHostCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
2588 }
2589
2590 pVCpu->hm.s.fForceTLBFlush = false;
2591 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes;
2592 pVCpu->hm.s.uCurrentAsid = pHostCpu->uCurrentAsid;
2593 if (pHostCpu->fFlushAsidBeforeUse)
2594 {
2595 if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
2596 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
2597 else if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS)
2598 {
2599 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
2600 pHostCpu->fFlushAsidBeforeUse = false;
2601 }
2602 else
2603 {
2604 /* hmR0VmxSetupTaggedTlb() ensures we never get here. Paranoia. */
2605 AssertMsgFailed(("Unsupported VPID-flush context type.\n"));
2606 }
2607 }
2608 }
2609
2610 AssertMsg(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes,
2611 ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pHostCpu->cTlbFlushes));
2612 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2613 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu,
2614 pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));
2615 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2616 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2617
2618 int rc = VMXWriteVmcs32(VMX_VMCS16_VPID, pVCpu->hm.s.uCurrentAsid);
2619 AssertRC(rc);
2620}
2621
2622
2623/**
2624 * Flushes the guest TLB entry based on CPU capabilities.
2625 *
2626 * @param pHostCpu The HM physical-CPU structure.
2627 * @param pVCpu The cross context virtual CPU structure.
2628 * @param pVmcsInfo The VMCS info. object.
2629 *
2630 * @remarks Called with interrupts disabled.
2631 */
2632static void hmR0VmxFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
2633{
2634#ifdef HMVMX_ALWAYS_FLUSH_TLB
2635 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2636#endif
2637 PVM pVM = pVCpu->CTX_SUFF(pVM);
2638 switch (pVM->hm.s.vmx.enmTlbFlushType)
2639 {
2640 case VMXTLBFLUSHTYPE_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pHostCpu, pVCpu, pVmcsInfo); break;
2641 case VMXTLBFLUSHTYPE_EPT: hmR0VmxFlushTaggedTlbEpt(pHostCpu, pVCpu, pVmcsInfo); break;
2642 case VMXTLBFLUSHTYPE_VPID: hmR0VmxFlushTaggedTlbVpid(pHostCpu, pVCpu); break;
2643 case VMXTLBFLUSHTYPE_NONE: hmR0VmxFlushTaggedTlbNone(pHostCpu, pVCpu); break;
2644 default:
2645 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
2646 break;
2647 }
2648 /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */
2649}
2650
2651
2652/**
2653 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
2654 * TLB entries from the host TLB before VM-entry.
2655 *
2656 * @returns VBox status code.
2657 * @param pVM The cross context VM structure.
2658 */
2659static int hmR0VmxSetupTaggedTlb(PVM pVM)
2660{
2661 /*
2662 * Determine optimal flush type for nested paging.
2663 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup
2664 * unrestricted guest execution (see hmR3InitFinalizeR0()).
2665 */
2666 if (pVM->hm.s.fNestedPaging)
2667 {
2668 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
2669 {
2670 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
2671 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_SINGLE_CONTEXT;
2672 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
2673 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_ALL_CONTEXTS;
2674 else
2675 {
2676 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
2677 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2678 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_FLUSH_TYPE_UNSUPPORTED;
2679 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2680 }
2681
2682 /* Make sure the write-back cacheable memory type for EPT is supported. */
2683 if (RT_UNLIKELY(!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB)))
2684 {
2685 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2686 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_MEM_TYPE_NOT_WB;
2687 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2688 }
2689
2690 /* EPT requires a page-walk length of 4. */
2691 if (RT_UNLIKELY(!(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4)))
2692 {
2693 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2694 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_PAGE_WALK_LENGTH_UNSUPPORTED;
2695 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2696 }
2697 }
2698 else
2699 {
2700 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
2701 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
2702 pVM->aCpus[0].hm.s.u32HMError = VMX_UFC_EPT_INVEPT_UNAVAILABLE;
2703 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2704 }
2705 }
2706
2707 /*
2708 * Determine optimal flush type for VPID.
2709 */
2710 if (pVM->hm.s.vmx.fVpid)
2711 {
2712 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
2713 {
2714 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
2715 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_SINGLE_CONTEXT;
2716 else if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
2717 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_ALL_CONTEXTS;
2718 else
2719 {
2720 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
2721 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2722 LogRelFunc(("Only INDIV_ADDR supported. Ignoring VPID.\n"));
2723 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
2724 LogRelFunc(("Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
2725 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED;
2726 pVM->hm.s.vmx.fVpid = false;
2727 }
2728 }
2729 else
2730 {
2731 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
2732 Log4Func(("VPID supported without INVEPT support. Ignoring VPID.\n"));
2733 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED;
2734 pVM->hm.s.vmx.fVpid = false;
2735 }
2736 }
2737
2738 /*
2739 * Setup the handler for flushing tagged-TLBs.
2740 */
2741 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
2742 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_EPT_VPID;
2743 else if (pVM->hm.s.fNestedPaging)
2744 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_EPT;
2745 else if (pVM->hm.s.vmx.fVpid)
2746 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_VPID;
2747 else
2748 pVM->hm.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_NONE;
2749 return VINF_SUCCESS;
2750}
2751
2752
2753/**
2754 * Sets up the virtual-APIC page address for the VMCS.
2755 *
2756 * @returns VBox status code.
2757 * @param pVCpu The cross context virtual CPU structure.
2758 * @param pVmcsInfo The VMCS info. object.
2759 */
2760DECLINLINE(int) hmR0VmxSetupVmcsVirtApicAddr(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
2761{
2762 NOREF(pVCpu); /* Used implicitly by VMXWriteVmcs64 on 32-bit hosts. */
2763 RTHCPHYS const HCPhysVirtApic = pVmcsInfo->HCPhysVirtApic;
2764 Assert(HCPhysVirtApic != NIL_RTHCPHYS);
2765 Assert(!(HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
2766 return VMXWriteVmcs64(VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, HCPhysVirtApic);
2767}
2768
2769
2770/**
2771 * Sets up the MSR-bitmap address for the VMCS.
2772 *
2773 * @returns VBox status code.
2774 * @param pVCpu The cross context virtual CPU structure.
2775 * @param pVmcsInfo The VMCS info. object.
2776 */
2777DECLINLINE(int) hmR0VmxSetupVmcsMsrBitmapAddr(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
2778{
2779 NOREF(pVCpu); /* Used implicitly by VMXWriteVmcs64 on 32-bit hosts. */
2780 RTHCPHYS const HCPhysMsrBitmap = pVmcsInfo->HCPhysMsrBitmap;
2781 Assert(HCPhysMsrBitmap != NIL_RTHCPHYS);
2782 Assert(!(HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
2783 return VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, HCPhysMsrBitmap);
2784}
2785
2786
2787/**
2788 * Sets up the APIC-access page address for the VMCS.
2789 *
2790 * @returns VBox status code.
2791 * @param pVCpu The cross context virtual CPU structure.
2792 */
2793DECLINLINE(int) hmR0VmxSetupVmcsApicAccessAddr(PVMCPU pVCpu)
2794{
2795 RTHCPHYS const HCPhysApicAccess = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.HCPhysApicAccess;
2796 Assert(HCPhysApicAccess != NIL_RTHCPHYS);
2797 Assert(!(HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
2798 return VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, HCPhysApicAccess);
2799}
2800
2801
2802/**
2803 * Sets up the VMCS link pointer for the VMCS.
2804 *
2805 * @returns VBox status code.
2806 * @param pVCpu The cross context virtual CPU structure.
2807 * @param pVmcsInfo The VMCS info. object.
2808 */
2809DECLINLINE(int) hmR0VmxSetupVmcsLinkPtr(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
2810{
2811 NOREF(pVCpu); /* Used implicitly by VMXWriteVmcs64 on 32-bit hosts. */
2812 uint64_t const u64VmcsLinkPtr = pVmcsInfo->u64VmcsLinkPtr;
2813 Assert(u64VmcsLinkPtr == UINT64_C(0xffffffffffffffff)); /* Bits 63:0 MB1. */
2814 return VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, u64VmcsLinkPtr);
2815}
2816
2817
2818/**
2819 * Sets up the VM-entry MSR load, VM-exit MSR-store and VM-exit MSR-load addresses
2820 * in the VMCS.
2821 *
2822 * @returns VBox status code.
2823 * @param pVCpu The cross context virtual CPU structure.
2824 * @param pVmcsInfo The VMCS info. object.
2825 */
2826DECLINLINE(int) hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
2827{
2828 NOREF(pVCpu); /* Used implicitly by VMXWriteVmcs64 on 32-bit hosts. */
2829
2830 RTHCPHYS const HCPhysGuestMsrLoad = pVmcsInfo->HCPhysGuestMsrLoad;
2831 Assert(HCPhysGuestMsrLoad != NIL_RTHCPHYS);
2832 Assert(!(HCPhysGuestMsrLoad & 0xf)); /* Bits 3:0 MBZ. */
2833
2834 RTHCPHYS const HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrStore;
2835 Assert(HCPhysGuestMsrStore != NIL_RTHCPHYS);
2836 Assert(!(HCPhysGuestMsrStore & 0xf)); /* Bits 3:0 MBZ. */
2837
2838 RTHCPHYS const HCPhysHostMsrLoad = pVmcsInfo->HCPhysHostMsrLoad;
2839 Assert(HCPhysHostMsrLoad != NIL_RTHCPHYS);
2840 Assert(!(HCPhysHostMsrLoad & 0xf)); /* Bits 3:0 MBZ. */
2841
2842 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, HCPhysGuestMsrLoad);
2843 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, HCPhysGuestMsrStore);
2844 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, HCPhysHostMsrLoad);
2845 AssertRCReturn(rc, rc);
2846 return VINF_SUCCESS;
2847}
2848
2849
2850/**
2851 * Sets up MSR permissions in the MSR bitmap of a VMCS info. object.
2852 *
2853 * @param pVCpu The cross context virtual CPU structure.
2854 * @param pVmcsInfo The VMCS info. object.
2855 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
2856 */
2857static void hmR0VmxSetupVmcsMsrPermissions(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
2858{
2859 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS);
2860
2861 /*
2862 * The guest can access the following MSRs (read, write) without causing
2863 * VM-exits; they are loaded/stored automatically using fields in the VMCS.
2864 */
2865 PVM pVM = pVCpu->CTX_SUFF(pVM);
2866 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_SYSENTER_CS, VMXMSRPM_ALLOW_RD_WR);
2867 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_SYSENTER_ESP, VMXMSRPM_ALLOW_RD_WR);
2868 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_SYSENTER_EIP, VMXMSRPM_ALLOW_RD_WR);
2869 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_GS_BASE, VMXMSRPM_ALLOW_RD_WR);
2870 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_FS_BASE, VMXMSRPM_ALLOW_RD_WR);
2871
2872 /*
2873 * The IA32_PRED_CMD and IA32_FLUSH_CMD MSRs are write-only and has no state
2874 * associated with then. We never need to intercept access (writes need to be
2875 * executed without causing a VM-exit, reads will #GP fault anyway).
2876 *
2877 * The IA32_SPEC_CTRL MSR is read/write and has state. We allow the guest to
2878 * read/write them. We swap the the guest/host MSR value using the
2879 * auto-load/store MSR area.
2880 */
2881 if (pVM->cpum.ro.GuestFeatures.fIbpb)
2882 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_PRED_CMD, VMXMSRPM_ALLOW_RD_WR);
2883 if (pVM->cpum.ro.GuestFeatures.fFlushCmd)
2884 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_FLUSH_CMD, VMXMSRPM_ALLOW_RD_WR);
2885 if (pVM->cpum.ro.GuestFeatures.fIbrs)
2886 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_IA32_SPEC_CTRL, VMXMSRPM_ALLOW_RD_WR);
2887
2888#if HC_ARCH_BITS == 64
2889 /*
2890 * Allow full read/write access for the following MSRs (mandatory for VT-x)
2891 * required for 64-bit guests.
2892 */
2893 if (pVM->hm.s.fAllow64BitGuests)
2894 {
2895 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_LSTAR, VMXMSRPM_ALLOW_RD_WR);
2896 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K6_STAR, VMXMSRPM_ALLOW_RD_WR);
2897 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_SF_MASK, VMXMSRPM_ALLOW_RD_WR);
2898 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, MSR_K8_KERNEL_GS_BASE, VMXMSRPM_ALLOW_RD_WR);
2899 }
2900#endif
2901
2902 /*
2903 * IA32_EFER MSR is always intercepted, see @bugref{9180#c37}.
2904 */
2905#ifdef VBOX_STRICT
2906 Assert(pVmcsInfo->pvMsrBitmap);
2907 uint32_t const fMsrpmEfer = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, MSR_K6_EFER);
2908 Assert(fMsrpmEfer == VMXMSRPM_EXIT_RD_WR);
2909#endif
2910}
2911
2912
2913/**
2914 * Sets up pin-based VM-execution controls in the VMCS.
2915 *
2916 * @returns VBox status code.
2917 * @param pVCpu The cross context virtual CPU structure.
2918 * @param pVmcsInfo The VMCS info. object.
2919 */
2920static int hmR0VmxSetupVmcsPinCtls(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
2921{
2922 PVM pVM = pVCpu->CTX_SUFF(pVM);
2923 uint32_t fVal = pVM->hm.s.vmx.Msrs.PinCtls.n.allowed0; /* Bits set here must always be set. */
2924 uint32_t const fZap = pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
2925
2926 fVal |= VMX_PIN_CTLS_EXT_INT_EXIT /* External interrupts cause a VM-exit. */
2927 | VMX_PIN_CTLS_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */
2928
2929 if (pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_PIN_CTLS_VIRT_NMI)
2930 fVal |= VMX_PIN_CTLS_VIRT_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */
2931
2932 /* Enable the VMX-preemption timer. */
2933 if (pVM->hm.s.vmx.fUsePreemptTimer)
2934 {
2935 Assert(pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_PIN_CTLS_PREEMPT_TIMER);
2936 fVal |= VMX_PIN_CTLS_PREEMPT_TIMER;
2937 }
2938
2939#if 0
2940 /* Enable posted-interrupt processing. */
2941 if (pVM->hm.s.fPostedIntrs)
2942 {
2943 Assert(pVM->hm.s.vmx.Msrs.PinCtls.n.allowed1 & VMX_PIN_CTLS_POSTED_INT);
2944 Assert(pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_ACK_EXT_INT);
2945 fVal |= VMX_PIN_CTL_POSTED_INT;
2946 }
2947#endif
2948
2949 if ((fVal & fZap) != fVal)
2950 {
2951 LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
2952 pVM->hm.s.vmx.Msrs.PinCtls.n.allowed0, fVal, fZap));
2953 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
2954 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2955 }
2956
2957 /* Commit it to the VMCS and update our cache. */
2958 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, fVal);
2959 AssertRCReturn(rc, rc);
2960 pVmcsInfo->u32PinCtls = fVal;
2961
2962 return VINF_SUCCESS;
2963}
2964
2965
2966/**
2967 * Sets up secondary processor-based VM-execution controls in the VMCS.
2968 *
2969 * @returns VBox status code.
2970 * @param pVCpu The cross context virtual CPU structure.
2971 * @param pVmcsInfo The VMCS info. object.
2972 */
2973static int hmR0VmxSetupVmcsProcCtls2(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
2974{
2975 PVM pVM = pVCpu->CTX_SUFF(pVM);
2976 uint32_t fVal = pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed0; /* Bits set here must be set in the VMCS. */
2977 uint32_t const fZap = pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2978
2979 /* WBINVD causes a VM-exit. */
2980 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_WBINVD_EXIT)
2981 fVal |= VMX_PROC_CTLS2_WBINVD_EXIT;
2982
2983 /* Enable EPT (aka nested-paging). */
2984 if (pVM->hm.s.fNestedPaging)
2985 fVal |= VMX_PROC_CTLS2_EPT;
2986
2987 /* Enable the INVPCID instruction if supported by the hardware and we expose
2988 it to the guest. Without this, guest executing INVPCID would cause a #UD. */
2989 if ( (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_INVPCID)
2990 && pVM->cpum.ro.GuestFeatures.fInvpcid)
2991 fVal |= VMX_PROC_CTLS2_INVPCID;
2992
2993 /* Enable VPID. */
2994 if (pVM->hm.s.vmx.fVpid)
2995 fVal |= VMX_PROC_CTLS2_VPID;
2996
2997 /* Enable unrestricted guest execution. */
2998 if (pVM->hm.s.vmx.fUnrestrictedGuest)
2999 fVal |= VMX_PROC_CTLS2_UNRESTRICTED_GUEST;
3000
3001#if 0
3002 if (pVM->hm.s.fVirtApicRegs)
3003 {
3004 /* Enable APIC-register virtualization. */
3005 Assert(pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_APIC_REG_VIRT);
3006 fVal |= VMX_PROC_CTLS2_APIC_REG_VIRT;
3007
3008 /* Enable virtual-interrupt delivery. */
3009 Assert(pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_INTR_DELIVERY);
3010 fVal |= VMX_PROC_CTLS2_VIRT_INTR_DELIVERY;
3011 }
3012#endif
3013
3014 /* Virtualize-APIC accesses if supported by the CPU. The virtual-APIC page is where the TPR shadow resides. */
3015 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
3016 * done dynamically. */
3017 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
3018 {
3019 fVal |= VMX_PROC_CTLS2_VIRT_APIC_ACCESS;
3020 int rc = hmR0VmxSetupVmcsApicAccessAddr(pVCpu);
3021 AssertRCReturn(rc, rc);
3022 }
3023
3024 /* Enable the RDTSCP instruction if supported by the hardware and we expose
3025 it to the guest. Without this, guest executing RDTSCP would cause a #UD. */
3026 if ( (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_RDTSCP)
3027 && pVM->cpum.ro.GuestFeatures.fRdTscP)
3028 fVal |= VMX_PROC_CTLS2_RDTSCP;
3029
3030 /* Enable Pause-Loop exiting. */
3031 if ( pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT
3032 && pVM->hm.s.vmx.cPleGapTicks
3033 && pVM->hm.s.vmx.cPleWindowTicks)
3034 {
3035 fVal |= VMX_PROC_CTLS2_PAUSE_LOOP_EXIT;
3036
3037 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks);
3038 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks);
3039 AssertRCReturn(rc, rc);
3040 }
3041
3042 if ((fVal & fZap) != fVal)
3043 {
3044 LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
3045 pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed0, fVal, fZap));
3046 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
3047 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3048 }
3049
3050 /* Commit it to the VMCS and update our cache. */
3051 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, fVal);
3052 AssertRCReturn(rc, rc);
3053 pVmcsInfo->u32ProcCtls2 = fVal;
3054
3055 return VINF_SUCCESS;
3056}
3057
3058
3059/**
3060 * Sets up processor-based VM-execution controls in the VMCS.
3061 *
3062 * @returns VBox status code.
3063 * @param pVCpu The cross context virtual CPU structure.
3064 * @param pVmcsInfo The VMCS info. object.
3065 */
3066static int hmR0VmxSetupVmcsProcCtls(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
3067{
3068 PVM pVM = pVCpu->CTX_SUFF(pVM);
3069
3070 uint32_t fVal = pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
3071 uint32_t const fZap = pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3072
3073 fVal |= VMX_PROC_CTLS_HLT_EXIT /* HLT causes a VM-exit. */
3074 | VMX_PROC_CTLS_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
3075 | VMX_PROC_CTLS_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
3076 | VMX_PROC_CTLS_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
3077 | VMX_PROC_CTLS_RDPMC_EXIT /* RDPMC causes a VM-exit. */
3078 | VMX_PROC_CTLS_MONITOR_EXIT /* MONITOR causes a VM-exit. */
3079 | VMX_PROC_CTLS_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
3080
3081 /* We toggle VMX_PROC_CTLS_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
3082 if ( !(pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MOV_DR_EXIT)
3083 || (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0 & VMX_PROC_CTLS_MOV_DR_EXIT))
3084 {
3085 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
3086 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3087 }
3088
3089 /* Without nested paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
3090 if (!pVM->hm.s.fNestedPaging)
3091 {
3092 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest);
3093 fVal |= VMX_PROC_CTLS_INVLPG_EXIT
3094 | VMX_PROC_CTLS_CR3_LOAD_EXIT
3095 | VMX_PROC_CTLS_CR3_STORE_EXIT;
3096 }
3097
3098 /* Use TPR shadowing if supported by the CPU. */
3099 if ( PDMHasApic(pVM)
3100 && pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW)
3101 {
3102 fVal |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
3103 /* CR8 writes cause a VM-exit based on TPR threshold. */
3104 Assert(!(fVal & VMX_PROC_CTLS_CR8_STORE_EXIT));
3105 Assert(!(fVal & VMX_PROC_CTLS_CR8_LOAD_EXIT));
3106 int rc = hmR0VmxSetupVmcsVirtApicAddr(pVCpu, pVmcsInfo);
3107 AssertRCReturn(rc, rc);
3108 }
3109 else
3110 {
3111 /* Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is
3112 invalid on 32-bit Intel CPUs. Set this control only for 64-bit guests. */
3113 if (pVM->hm.s.fAllow64BitGuests)
3114 {
3115 fVal |= VMX_PROC_CTLS_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */
3116 | VMX_PROC_CTLS_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */
3117 }
3118 }
3119
3120 /* Use MSR-bitmaps if supported by the CPU. */
3121 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
3122 {
3123 fVal |= VMX_PROC_CTLS_USE_MSR_BITMAPS;
3124 int rc = hmR0VmxSetupVmcsMsrBitmapAddr(pVCpu, pVmcsInfo);
3125 AssertRCReturn(rc, rc);
3126 }
3127
3128 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
3129 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
3130 fVal |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
3131
3132 if ((fVal & fZap) != fVal)
3133 {
3134 LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
3135 pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0, fVal, fZap));
3136 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
3137 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3138 }
3139
3140 /* Commit it to the VMCS and update our cache. */
3141 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, fVal);
3142 AssertRCReturn(rc, rc);
3143 pVmcsInfo->u32ProcCtls = fVal;
3144
3145 /* Set up MSR permissions that don't change through the lifetime of the VM. */
3146 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
3147 hmR0VmxSetupVmcsMsrPermissions(pVCpu, pVmcsInfo, false /* fIsNstGstVmcs */);
3148
3149 /* Set up secondary processor-based VM-execution controls if the CPU supports it. */
3150 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
3151 return hmR0VmxSetupVmcsProcCtls2(pVCpu, pVmcsInfo);
3152
3153 /* Sanity check, should not really happen. */
3154 if (RT_LIKELY(!pVM->hm.s.vmx.fUnrestrictedGuest))
3155 { /* likely */ }
3156 else
3157 {
3158 pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;
3159 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3160 }
3161
3162 /* Old CPUs without secondary processor-based VM-execution controls would end up here. */
3163 return VINF_SUCCESS;
3164}
3165
3166
3167/**
3168 * Sets up miscellaneous (everything other than Pin, Processor and secondary
3169 * Processor-based VM-execution) control fields in the VMCS.
3170 *
3171 * @returns VBox status code.
3172 * @param pVCpu The cross context virtual CPU structure.
3173 * @param pVmcsInfo The VMCS info. object.
3174 */
3175static int hmR0VmxSetupVmcsMiscCtls(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
3176{
3177 /* Set the auto-load/store MSR area addresses in the VMCS. */
3178 int rc = hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(pVCpu, pVmcsInfo);
3179 if (RT_SUCCESS(rc))
3180 {
3181 /* Set the VMCS link pointer in the VMCS. */
3182 rc = hmR0VmxSetupVmcsLinkPtr(pVCpu, pVmcsInfo);
3183 if (RT_SUCCESS(rc))
3184 {
3185 /* Set the CR0/CR4 guest/host mask. */
3186 uint64_t const u64Cr0Mask = hmR0VmxGetFixedCr0Mask(pVCpu);
3187 uint64_t const u64Cr4Mask = hmR0VmxGetFixedCr4Mask(pVCpu);
3188 rc = VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask);
3189 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask);
3190 if (RT_SUCCESS(rc))
3191 {
3192 pVmcsInfo->u64Cr0Mask = u64Cr0Mask;
3193 pVmcsInfo->u64Cr4Mask = u64Cr4Mask;
3194 return VINF_SUCCESS;
3195 }
3196 LogRelFunc(("Failed to initialize VMCS CR0/CR4 guest/host mask. rc=%Rrc\n", rc));
3197 }
3198 else
3199 LogRelFunc(("Failed to initialize VMCS link pointer. rc=%Rrc\n", rc));
3200 }
3201 else
3202 LogRelFunc(("Failed to initialize VMCS auto-load/store MSR addresses. rc=%Rrc\n", rc));
3203 return rc;
3204}
3205
3206
3207/**
3208 * Sets up the initial exception bitmap in the VMCS based on static conditions.
3209 *
3210 * We shall setup those exception intercepts that don't change during the
3211 * lifetime of the VM here. The rest are done dynamically while loading the
3212 * guest state.
3213 *
3214 * @returns VBox status code.
3215 * @param pVCpu The cross context virtual CPU structure.
3216 * @param pVmcsInfo The VMCS info. object.
3217 */
3218static int hmR0VmxSetupVmcsXcptBitmap(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
3219{
3220 /*
3221 * The following exceptions are always intercepted:
3222 *
3223 * #AC - To prevent the guest from hanging the CPU.
3224 * #DB - To maintain the DR6 state even when intercepting DRx reads/writes and
3225 * recursive #DBs can cause a CPU hang.
3226 * #PF - To sync our shadow page tables when nested-paging is not used.
3227 */
3228 bool const fNestedPaging = pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging;
3229 uint32_t const uXcptBitmap = RT_BIT(X86_XCPT_AC)
3230 | RT_BIT(X86_XCPT_DB)
3231 | (fNestedPaging ? 0 : RT_BIT(X86_XCPT_PF));
3232
3233 /* Commit it to the VMCS. */
3234 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
3235 AssertRCReturn(rc, rc);
3236
3237 /* Update our cache of the exception bitmap. */
3238 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
3239 return VINF_SUCCESS;
3240}
3241
3242
3243#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3244/**
3245 * Sets up the VMCS for executing a nested-guest using hardware-assisted VMX.
3246 *
3247 * @returns VBox status code.
3248 * @param pVCpu The cross context virtual CPU structure.
3249 * @param pVmcsInfo The VMCS info. object.
3250 */
3251static int hmR0VmxSetupVmcsCtlsNested(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
3252{
3253 PVM pVM = pVCpu->CTX_SUFF(pVM);
3254 int rc = hmR0VmxSetupVmcsLinkPtr(pVCpu, pVmcsInfo);
3255 if (RT_SUCCESS(rc))
3256 {
3257 rc = hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(pVCpu, pVmcsInfo);
3258 if (RT_SUCCESS(rc))
3259 {
3260 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
3261 rc = hmR0VmxSetupVmcsMsrBitmapAddr(pVCpu, pVmcsInfo);
3262 if (RT_SUCCESS(rc))
3263 {
3264 if (pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
3265 rc = hmR0VmxSetupVmcsApicAccessAddr(pVCpu);
3266 if (RT_SUCCESS(rc))
3267 return VINF_SUCCESS;
3268
3269 LogRelFunc(("Failed to set up the APIC-access address in the nested-guest VMCS. rc=%Rrc\n", rc));
3270 }
3271 else
3272 LogRelFunc(("Failed to set up the MSR-bitmap address in the nested-guest VMCS. rc=%Rrc\n", rc));
3273 }
3274 else
3275 LogRelFunc(("Failed to set up the VMCS link pointer in the nested-guest VMCS. rc=%Rrc\n", rc));
3276 }
3277 else
3278 LogRelFunc(("Failed to set up the auto-load/store MSR addresses in the nested-guest VMCS. rc=%Rrc\n", rc));
3279
3280 return rc;
3281}
3282#endif
3283
3284
3285/**
3286 * Sets up the VMCS for executing a guest (or nested-guest) using hardware-assisted
3287 * VMX.
3288 *
3289 * @returns VBox status code.
3290 * @param pVCpu The cross context virtual CPU structure.
3291 * @param pVmcsInfo The VMCS info. object.
3292 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
3293 */
3294static int hmR0VmxSetupVmcs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
3295{
3296 Assert(pVmcsInfo);
3297 Assert(pVmcsInfo->pvVmcs);
3298 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3299
3300 /* Set the CPU specified revision identifier at the beginning of the VMCS structure. */
3301 PVM pVM = pVCpu->CTX_SUFF(pVM);
3302 *(uint32_t *)pVmcsInfo->pvVmcs = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_ID);
3303 const char * const pszVmcs = fIsNstGstVmcs ? "nested-guest VMCS" : "guest VMCS";
3304
3305 LogFlowFunc(("\n"));
3306
3307 /*
3308 * Initialize the VMCS using VMCLEAR before loading the VMCS.
3309 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
3310 */
3311 int rc = hmR0VmxClearVmcs(pVmcsInfo);
3312 if (RT_SUCCESS(rc))
3313 {
3314 rc = hmR0VmxLoadVmcs(pVmcsInfo);
3315 if (RT_SUCCESS(rc))
3316 {
3317 if (!fIsNstGstVmcs)
3318 {
3319 rc = hmR0VmxSetupVmcsPinCtls(pVCpu, pVmcsInfo);
3320 if (RT_SUCCESS(rc))
3321 {
3322 rc = hmR0VmxSetupVmcsProcCtls(pVCpu, pVmcsInfo);
3323 if (RT_SUCCESS(rc))
3324 {
3325 rc = hmR0VmxSetupVmcsMiscCtls(pVCpu, pVmcsInfo);
3326 if (RT_SUCCESS(rc))
3327 {
3328 rc = hmR0VmxSetupVmcsXcptBitmap(pVCpu, pVmcsInfo);
3329 if (RT_SUCCESS(rc))
3330 { /* likely */ }
3331 else
3332 LogRelFunc(("Failed to initialize exception bitmap. rc=%Rrc\n", rc));
3333 }
3334 else
3335 LogRelFunc(("Failed to setup miscellaneous controls. rc=%Rrc\n", rc));
3336 }
3337 else
3338 LogRelFunc(("Failed to setup processor-based VM-execution controls. rc=%Rrc\n", rc));
3339 }
3340 else
3341 LogRelFunc(("Failed to setup pin-based controls. rc=%Rrc\n", rc));
3342 }
3343 else
3344 {
3345#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3346 rc = hmR0VmxSetupVmcsCtlsNested(pVCpu, pVmcsInfo);
3347 if (RT_SUCCESS(rc))
3348 { /* likely */ }
3349 else
3350 LogRelFunc(("Failed to initialize nested-guest VMCS. rc=%Rrc\n", rc));
3351#else
3352 AssertFailed();
3353#endif
3354 }
3355 }
3356 else
3357 LogRelFunc(("Failed to load the %s. rc=%Rrc\n", rc, pszVmcs));
3358 }
3359 else
3360 LogRelFunc(("Failed to clear the %s. rc=%Rrc\n", rc, pszVmcs));
3361
3362 /* Sync any CPU internal VMCS data back into our VMCS in memory. */
3363 if (RT_SUCCESS(rc))
3364 {
3365 rc = hmR0VmxClearVmcs(pVmcsInfo);
3366 if (RT_SUCCESS(rc))
3367 { /* likely */ }
3368 else
3369 LogRelFunc(("Failed to clear the %s post setup. rc=%Rrc\n", rc, pszVmcs));
3370 }
3371
3372 /*
3373 * Update the last-error record both for failures and success, so we
3374 * can propagate the status code back to ring-3 for diagnostics.
3375 */
3376 hmR0VmxUpdateErrorRecord(pVCpu, rc);
3377 NOREF(pszVmcs);
3378 return rc;
3379}
3380
3381
3382/**
3383 * Does global VT-x initialization (called during module initialization).
3384 *
3385 * @returns VBox status code.
3386 */
3387VMMR0DECL(int) VMXR0GlobalInit(void)
3388{
3389#ifdef HMVMX_USE_FUNCTION_TABLE
3390 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
3391# ifdef VBOX_STRICT
3392 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
3393 Assert(g_apfnVMExitHandlers[i]);
3394# endif
3395#endif
3396 return VINF_SUCCESS;
3397}
3398
3399
3400/**
3401 * Does global VT-x termination (called during module termination).
3402 */
3403VMMR0DECL(void) VMXR0GlobalTerm()
3404{
3405 /* Nothing to do currently. */
3406}
3407
3408
3409/**
3410 * Sets up and activates VT-x on the current CPU.
3411 *
3412 * @returns VBox status code.
3413 * @param pHostCpu The HM physical-CPU structure.
3414 * @param pVM The cross context VM structure. Can be
3415 * NULL after a host resume operation.
3416 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
3417 * fEnabledByHost is @c true).
3418 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
3419 * @a fEnabledByHost is @c true).
3420 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
3421 * enable VT-x on the host.
3422 * @param pHwvirtMsrs Pointer to the hardware-virtualization MSRs.
3423 */
3424VMMR0DECL(int) VMXR0EnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
3425 PCSUPHWVIRTMSRS pHwvirtMsrs)
3426{
3427 Assert(pHostCpu);
3428 Assert(pHwvirtMsrs);
3429 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3430
3431 /* Enable VT-x if it's not already enabled by the host. */
3432 if (!fEnabledByHost)
3433 {
3434 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
3435 if (RT_FAILURE(rc))
3436 return rc;
3437 }
3438
3439 /*
3440 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been
3441 * using EPTPs) so we don't retain any stale guest-physical mappings which won't get
3442 * invalidated when flushing by VPID.
3443 */
3444 if (pHwvirtMsrs->u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
3445 {
3446 hmR0VmxFlushEpt(NULL /* pVCpu */, NULL /* pVmcsInfo */, VMXTLBFLUSHEPT_ALL_CONTEXTS);
3447 pHostCpu->fFlushAsidBeforeUse = false;
3448 }
3449 else
3450 pHostCpu->fFlushAsidBeforeUse = true;
3451
3452 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
3453 ++pHostCpu->cTlbFlushes;
3454
3455 return VINF_SUCCESS;
3456}
3457
3458
3459/**
3460 * Deactivates VT-x on the current CPU.
3461 *
3462 * @returns VBox status code.
3463 * @param pvCpuPage Pointer to the VMXON region.
3464 * @param HCPhysCpuPage Physical address of the VMXON region.
3465 *
3466 * @remarks This function should never be called when SUPR0EnableVTx() or
3467 * similar was used to enable VT-x on the host.
3468 */
3469VMMR0DECL(int) VMXR0DisableCpu(void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
3470{
3471 RT_NOREF2(pvCpuPage, HCPhysCpuPage);
3472
3473 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3474 return hmR0VmxLeaveRootMode();
3475}
3476
3477
3478/**
3479 * Does per-VM VT-x initialization.
3480 *
3481 * @returns VBox status code.
3482 * @param pVM The cross context VM structure.
3483 */
3484VMMR0DECL(int) VMXR0InitVM(PVM pVM)
3485{
3486 LogFlowFunc(("pVM=%p\n", pVM));
3487
3488 int rc = hmR0VmxStructsAlloc(pVM);
3489 if (RT_FAILURE(rc))
3490 {
3491 LogRelFunc(("Failed to allocated VMX structures. rc=%Rrc\n", rc));
3492 return rc;
3493 }
3494
3495 return VINF_SUCCESS;
3496}
3497
3498
3499/**
3500 * Does per-VM VT-x termination.
3501 *
3502 * @returns VBox status code.
3503 * @param pVM The cross context VM structure.
3504 */
3505VMMR0DECL(int) VMXR0TermVM(PVM pVM)
3506{
3507 LogFlowFunc(("pVM=%p\n", pVM));
3508
3509#ifdef VBOX_WITH_CRASHDUMP_MAGIC
3510 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
3511 {
3512 Assert(pVM->hm.s.vmx.pvScratch);
3513 ASMMemZero32(pVM->hm.s.vmx.pvScratch, X86_PAGE_4K_SIZE);
3514 }
3515#endif
3516 hmR0VmxStructsFree(pVM);
3517 return VINF_SUCCESS;
3518}
3519
3520
3521/**
3522 * Sets up the VM for execution using hardware-assisted VMX.
3523 * This function is only called once per-VM during initialization.
3524 *
3525 * @returns VBox status code.
3526 * @param pVM The cross context VM structure.
3527 */
3528VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
3529{
3530 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
3531 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3532
3533 LogFlowFunc(("pVM=%p\n", pVM));
3534
3535 /*
3536 * At least verify if VMX is enabled, since we can't check if we're in
3537 * VMX root mode or not without causing a #GP.
3538 */
3539 RTCCUINTREG const uHostCR4 = ASMGetCR4();
3540 if (RT_LIKELY(uHostCR4 & X86_CR4_VMXE))
3541 { /* likely */ }
3542 else
3543 return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
3544
3545 /*
3546 * Without unrestricted guest execution, pRealModeTSS and pNonPagingModeEPTPageTable *must*
3547 * always be allocated. We no longer support the highly unlikely case of unrestricted guest
3548 * without pRealModeTSS, see hmR3InitFinalizeR0Intel().
3549 */
3550 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
3551 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
3552 || !pVM->hm.s.vmx.pRealModeTSS))
3553 {
3554 LogRelFunc(("Invalid real-on-v86 state.\n"));
3555 return VERR_INTERNAL_ERROR;
3556 }
3557
3558 /* Initialize these always, see hmR3InitFinalizeR0().*/
3559 pVM->hm.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NONE;
3560 pVM->hm.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NONE;
3561
3562 /* Setup the tagged-TLB flush handlers. */
3563 int rc = hmR0VmxSetupTaggedTlb(pVM);
3564 if (RT_FAILURE(rc))
3565 {
3566 LogRelFunc(("hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
3567 return rc;
3568 }
3569
3570 /* Check if we can use the VMCS controls for swapping the EFER MSR. */
3571 Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
3572#if HC_ARCH_BITS == 64
3573 if ( (pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1 & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
3574 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_LOAD_EFER_MSR)
3575 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_EFER_MSR))
3576 pVM->hm.s.vmx.fSupportsVmcsEfer = true;
3577#endif
3578
3579 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3580 {
3581 PVMCPU pVCpu = &pVM->aCpus[idCpu];
3582 Log4Func(("pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
3583
3584 rc = hmR0VmxSetupVmcs(pVCpu, &pVCpu->hm.s.vmx.VmcsInfo, false /* fIsNstGstVmcs */);
3585 if (RT_SUCCESS(rc))
3586 {
3587#if HC_ARCH_BITS == 32
3588 hmR0VmxInitVmcsReadCache(pVCpu);
3589#endif
3590#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3591 if (pVM->cpum.ro.GuestFeatures.fVmx)
3592 {
3593 rc = hmR0VmxSetupVmcs(pVCpu, &pVCpu->hm.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */);
3594 if (RT_SUCCESS(rc))
3595 { /* likely */ }
3596 else
3597 {
3598 LogRelFunc(("Nested-guest VMCS setup failed. rc=%Rrc\n", rc));
3599 return rc;
3600 }
3601 }
3602#endif
3603 }
3604 else
3605 {
3606 LogRelFunc(("VMCS setup failed. rc=%Rrc\n", rc));
3607 return rc;
3608 }
3609 }
3610
3611 return VINF_SUCCESS;
3612}
3613
3614
3615#if HC_ARCH_BITS == 32
3616# ifdef VBOX_ENABLE_64_BITS_GUESTS
3617/**
3618 * Check if guest state allows safe use of 32-bit switcher again.
3619 *
3620 * Segment bases and protected mode structures must be 32-bit addressable
3621 * because the 32-bit switcher will ignore high dword when writing these VMCS
3622 * fields. See @bugref{8432} for details.
3623 *
3624 * @returns true if safe, false if must continue to use the 64-bit switcher.
3625 * @param pCtx Pointer to the guest-CPU context.
3626 *
3627 * @remarks No-long-jump zone!!!
3628 */
3629static bool hmR0VmxIs32BitSwitcherSafe(PCCPUMCTX pCtx)
3630{
3631 if (pCtx->gdtr.pGdt & UINT64_C(0xffffffff00000000)) return false;
3632 if (pCtx->idtr.pIdt & UINT64_C(0xffffffff00000000)) return false;
3633 if (pCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000)) return false;
3634 if (pCtx->tr.u64Base & UINT64_C(0xffffffff00000000)) return false;
3635 if (pCtx->es.u64Base & UINT64_C(0xffffffff00000000)) return false;
3636 if (pCtx->cs.u64Base & UINT64_C(0xffffffff00000000)) return false;
3637 if (pCtx->ss.u64Base & UINT64_C(0xffffffff00000000)) return false;
3638 if (pCtx->ds.u64Base & UINT64_C(0xffffffff00000000)) return false;
3639 if (pCtx->fs.u64Base & UINT64_C(0xffffffff00000000)) return false;
3640 if (pCtx->gs.u64Base & UINT64_C(0xffffffff00000000)) return false;
3641
3642 /* All good, bases are 32-bit. */
3643 return true;
3644}
3645# endif /* VBOX_ENABLE_64_BITS_GUESTS */
3646
3647# ifdef VBOX_STRICT
3648static bool hmR0VmxIsValidWriteField(uint32_t idxField)
3649{
3650 switch (idxField)
3651 {
3652 case VMX_VMCS_GUEST_RIP:
3653 case VMX_VMCS_GUEST_RSP:
3654 case VMX_VMCS_GUEST_SYSENTER_EIP:
3655 case VMX_VMCS_GUEST_SYSENTER_ESP:
3656 case VMX_VMCS_GUEST_GDTR_BASE:
3657 case VMX_VMCS_GUEST_IDTR_BASE:
3658 case VMX_VMCS_GUEST_CS_BASE:
3659 case VMX_VMCS_GUEST_DS_BASE:
3660 case VMX_VMCS_GUEST_ES_BASE:
3661 case VMX_VMCS_GUEST_FS_BASE:
3662 case VMX_VMCS_GUEST_GS_BASE:
3663 case VMX_VMCS_GUEST_SS_BASE:
3664 case VMX_VMCS_GUEST_LDTR_BASE:
3665 case VMX_VMCS_GUEST_TR_BASE:
3666 case VMX_VMCS_GUEST_CR3:
3667 return true;
3668 }
3669 return false;
3670}
3671
3672static bool hmR0VmxIsValidReadField(uint32_t idxField)
3673{
3674 switch (idxField)
3675 {
3676 /* Read-only fields. */
3677 case VMX_VMCS_RO_EXIT_QUALIFICATION:
3678 return true;
3679 }
3680 /* Remaining readable fields should also be writable. */
3681 return hmR0VmxIsValidWriteField(idxField);
3682}
3683# endif /* VBOX_STRICT */
3684
3685
3686/**
3687 * Executes the specified handler in 64-bit mode.
3688 *
3689 * @returns VBox status code (no informational status codes).
3690 * @param pVCpu The cross context virtual CPU structure.
3691 * @param enmOp The operation to perform.
3692 * @param cParams Number of parameters.
3693 * @param paParam Array of 32-bit parameters.
3694 */
3695VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cParams, uint32_t *paParam)
3696{
3697 PVM pVM = pVCpu->CTX_SUFF(pVM);
3698 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
3699 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
3700 Assert(pVCpu->hm.s.vmx.VmcsCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VmcsCache.Write.aField));
3701 Assert(pVCpu->hm.s.vmx.VmcsCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VmcsCache.Read.aField));
3702
3703#ifdef VBOX_STRICT
3704 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VmcsCache.Write.cValidEntries; i++)
3705 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VmcsCache.Write.aField[i]));
3706
3707 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VmcsCache.Read.cValidEntries; i++)
3708 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VmcsCache.Read.aField[i]));
3709#endif
3710
3711 /* Disable interrupts. */
3712 RTCCUINTREG fOldEFlags = ASMIntDisableFlags();
3713
3714#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
3715 RTCPUID idHostCpu = RTMpCpuId();
3716 CPUMR0SetLApic(pVCpu, idHostCpu);
3717#endif
3718
3719 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
3720
3721 PCHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
3722 RTHCPHYS const HCPhysCpuPage = pHostCpu->HCPhysMemObj;
3723
3724 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
3725 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
3726 hmR0VmxClearVmcs(pVmcsInfo);
3727
3728 /* Leave VMX root mode and disable VMX. */
3729 VMXDisable();
3730 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
3731
3732 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
3733 CPUMSetHyperEIP(pVCpu, enmOp);
3734 for (int i = (int)cParams - 1; i >= 0; i--)
3735 CPUMPushHyper(pVCpu, paParam[i]);
3736
3737 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
3738
3739 /* Call the switcher. */
3740 int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_UOFFSETOF_DYN(VM, aCpus[pVCpu->idCpu].cpum) - RT_UOFFSETOF(VM, cpum));
3741 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
3742
3743 /* Re-enable VMX to make sure the VMX instructions don't cause #UD faults. */
3744 SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);
3745
3746 /* Re-enter VMX root mode. */
3747 int rc2 = VMXEnable(HCPhysCpuPage);
3748 if (RT_FAILURE(rc2))
3749 {
3750 SUPR0ChangeCR4(0, ~X86_CR4_VMXE);
3751 ASMSetFlags(fOldEFlags);
3752 pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
3753 return rc2;
3754 }
3755
3756 /* Restore the VMCS as the current VMCS. */
3757 rc2 = hmR0VmxLoadVmcs(pVmcsInfo);
3758 AssertRC(rc2);
3759 Assert(!(ASMGetFlags() & X86_EFL_IF));
3760 ASMSetFlags(fOldEFlags);
3761 return rc;
3762}
3763
3764
3765/**
3766 * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts
3767 * supporting 64-bit guests.
3768 *
3769 * @returns VBox status code.
3770 * @param fResume Whether to VMLAUNCH or VMRESUME.
3771 * @param pCtx Pointer to the guest-CPU context.
3772 * @param pCache Pointer to the VMCS batch cache.
3773 * @param pVM The cross context VM structure.
3774 * @param pVCpu The cross context virtual CPU structure.
3775 */
3776DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMXVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
3777{
3778 NOREF(fResume);
3779
3780 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
3781 PCHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
3782 RTHCPHYS const HCPhysCpuPage = pHostCpu->HCPhysMemObj;
3783
3784#ifdef VBOX_WITH_CRASHDUMP_MAGIC
3785 pCache->uPos = 1;
3786 pCache->interPD = PGMGetInterPaeCR3(pVM);
3787 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
3788#endif
3789
3790#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
3791 pCache->TestIn.HCPhysCpuPage = 0;
3792 pCache->TestIn.HCPhysVmcs = 0;
3793 pCache->TestIn.pCache = 0;
3794 pCache->TestOut.HCPhysVmcs = 0;
3795 pCache->TestOut.pCache = 0;
3796 pCache->TestOut.pCtx = 0;
3797 pCache->TestOut.eflags = 0;
3798#else
3799 NOREF(pCache);
3800#endif
3801
3802 uint32_t aParam[10];
3803 aParam[0] = RT_LO_U32(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
3804 aParam[1] = RT_HI_U32(HCPhysCpuPage); /* Param 1: VMXON physical address - Hi. */
3805 aParam[2] = RT_LO_U32(pVmcsInfo->HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
3806 aParam[3] = RT_HI_U32(pVmcsInfo->HCPhysVmcs); /* Param 2: VMCS physical address - Hi. */
3807 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsCache);
3808 aParam[5] = 0;
3809 aParam[6] = VM_RC_ADDR(pVM, pVM);
3810 aParam[7] = 0;
3811 aParam[8] = VM_RC_ADDR(pVM, pVCpu);
3812 aParam[9] = 0;
3813
3814#ifdef VBOX_WITH_CRASHDUMP_MAGIC
3815 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
3816 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
3817#endif
3818 int rc = VMXR0Execute64BitsHandler(pVCpu, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]);
3819
3820#ifdef VBOX_WITH_CRASHDUMP_MAGIC
3821 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
3822 Assert(pCtx->dr[4] == 10);
3823 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
3824#endif
3825
3826#if defined(DEBUG) && defined(VMX_USE_CACHED_VMCS_ACCESSES)
3827 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
3828 AssertMsg(pCache->TestIn.HCPhysVmcs == pVmcsInfo->HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
3829 pVmcsInfo->HCPhysVmcs));
3830 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
3831 pCache->TestOut.HCPhysVmcs));
3832 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
3833 pCache->TestOut.pCache));
3834 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsCache),
3835 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VmcsCache)));
3836 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
3837 pCache->TestOut.pCtx));
3838 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
3839#endif
3840 NOREF(pCtx);
3841 return rc;
3842}
3843#endif
3844
3845
3846/**
3847 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
3848 * the VMCS.
3849 *
3850 * @returns VBox status code.
3851 */
3852static int hmR0VmxExportHostControlRegs(void)
3853{
3854 RTCCUINTREG uReg = ASMGetCR0();
3855 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
3856 AssertRCReturn(rc, rc);
3857
3858 uReg = ASMGetCR3();
3859 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
3860 AssertRCReturn(rc, rc);
3861
3862 uReg = ASMGetCR4();
3863 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
3864 AssertRCReturn(rc, rc);
3865 return rc;
3866}
3867
3868
3869/**
3870 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
3871 * the host-state area in the VMCS.
3872 *
3873 * @returns VBox status code.
3874 * @param pVCpu The cross context virtual CPU structure.
3875 */
3876static int hmR0VmxExportHostSegmentRegs(PVMCPU pVCpu)
3877{
3878#if HC_ARCH_BITS == 64
3879/**
3880 * Macro for adjusting host segment selectors to satisfy VT-x's VM-entry
3881 * requirements. See hmR0VmxExportHostSegmentRegs().
3882 */
3883# define VMXLOCAL_ADJUST_HOST_SEG(seg, selValue) \
3884 if ((selValue) & (X86_SEL_RPL | X86_SEL_LDT)) \
3885 { \
3886 bool fValidSelector = true; \
3887 if ((selValue) & X86_SEL_LDT) \
3888 { \
3889 uint32_t uAttr = ASMGetSegAttr((selValue)); \
3890 fValidSelector = RT_BOOL(uAttr != UINT32_MAX && (uAttr & X86_DESC_P)); \
3891 } \
3892 if (fValidSelector) \
3893 { \
3894 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##seg; \
3895 pVCpu->hm.s.vmx.RestoreHost.uHostSel##seg = (selValue); \
3896 } \
3897 (selValue) = 0; \
3898 }
3899
3900 /*
3901 * If we've executed guest code using hardware-assisted VMX, the host-state bits
3902 * will be messed up. We should -not- save the messed up state without restoring
3903 * the original host-state, see @bugref{7240}.
3904 *
3905 * This apparently can happen (most likely the FPU changes), deal with it rather than
3906 * asserting. Was observed booting Solaris 10u10 32-bit guest.
3907 */
3908 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
3909 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
3910 {
3911 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags,
3912 pVCpu->idCpu));
3913 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
3914 }
3915 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
3916#else
3917 RT_NOREF(pVCpu);
3918#endif
3919
3920 /*
3921 * Host DS, ES, FS and GS segment registers.
3922 */
3923#if HC_ARCH_BITS == 64
3924 RTSEL uSelDS = ASMGetDS();
3925 RTSEL uSelES = ASMGetES();
3926 RTSEL uSelFS = ASMGetFS();
3927 RTSEL uSelGS = ASMGetGS();
3928#else
3929 RTSEL uSelDS = 0;
3930 RTSEL uSelES = 0;
3931 RTSEL uSelFS = 0;
3932 RTSEL uSelGS = 0;
3933#endif
3934
3935 /*
3936 * Host CS and SS segment registers.
3937 */
3938 RTSEL uSelCS = ASMGetCS();
3939 RTSEL uSelSS = ASMGetSS();
3940
3941 /*
3942 * Host TR segment register.
3943 */
3944 RTSEL uSelTR = ASMGetTR();
3945
3946#if HC_ARCH_BITS == 64
3947 /*
3948 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to
3949 * gain VM-entry and restore them before we get preempted.
3950 *
3951 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
3952 */
3953 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS);
3954 VMXLOCAL_ADJUST_HOST_SEG(ES, uSelES);
3955 VMXLOCAL_ADJUST_HOST_SEG(FS, uSelFS);
3956 VMXLOCAL_ADJUST_HOST_SEG(GS, uSelGS);
3957# undef VMXLOCAL_ADJUST_HOST_SEG
3958#endif
3959
3960 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
3961 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
3962 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
3963 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
3964 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
3965 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
3966 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
3967 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));
3968 Assert(uSelCS);
3969 Assert(uSelTR);
3970
3971 /* Write these host selector fields into the host-state area in the VMCS. */
3972 int rc = VMXWriteVmcs32(VMX_VMCS16_HOST_CS_SEL, uSelCS);
3973 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_SS_SEL, uSelSS);
3974#if HC_ARCH_BITS == 64
3975 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_DS_SEL, uSelDS);
3976 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_ES_SEL, uSelES);
3977 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FS_SEL, uSelFS);
3978 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_GS_SEL, uSelGS);
3979#else
3980 NOREF(uSelDS);
3981 NOREF(uSelES);
3982 NOREF(uSelFS);
3983 NOREF(uSelGS);
3984#endif
3985 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_TR_SEL, uSelTR);
3986 AssertRCReturn(rc, rc);
3987
3988 /*
3989 * Host GDTR and IDTR.
3990 */
3991 RTGDTR Gdtr;
3992 RTIDTR Idtr;
3993 RT_ZERO(Gdtr);
3994 RT_ZERO(Idtr);
3995 ASMGetGDTR(&Gdtr);
3996 ASMGetIDTR(&Idtr);
3997 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt);
3998 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt);
3999 AssertRCReturn(rc, rc);
4000
4001#if HC_ARCH_BITS == 64
4002 /*
4003 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps
4004 * them to the maximum limit (0xffff) on every VM-exit.
4005 */
4006 if (Gdtr.cbGdt != 0xffff)
4007 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
4008
4009 /*
4010 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT" and
4011 * Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit
4012 * as 0xfff, VT-x bloating the limit to 0xffff shouldn't cause any different CPU behavior.
4013 * However, several hosts either insists on 0xfff being the limit (Windows Patch Guard) or
4014 * uses the limit for other purposes (darwin puts the CPU ID in there but botches sidt
4015 * alignment in at least one consumer). So, we're only allowing the IDTR.LIMIT to be left
4016 * at 0xffff on hosts where we are sure it won't cause trouble.
4017 */
4018# if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
4019 if (Idtr.cbIdt < 0x0fff)
4020# else
4021 if (Idtr.cbIdt != 0xffff)
4022# endif
4023 {
4024 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
4025 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
4026 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
4027 }
4028#endif
4029
4030 /*
4031 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI
4032 * and RPL bits is effectively what the CPU does for "scaling by 8". TI is always 0 and
4033 * RPL should be too in most cases.
4034 */
4035 AssertMsgReturn((uSelTR | X86_SEL_RPL_LDT) <= Gdtr.cbGdt,
4036 ("TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt), VERR_VMX_INVALID_HOST_STATE);
4037
4038 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
4039#if HC_ARCH_BITS == 64
4040 uintptr_t const uTRBase = X86DESC64_BASE(pDesc);
4041
4042 /*
4043 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on
4044 * all VM-exits. The type is the same for 64-bit busy TSS[1]. The limit needs manual
4045 * restoration if the host has something else. Task switching is not supported in 64-bit
4046 * mode[2], but the limit still matters as IOPM is supported in 64-bit mode. Restoring the
4047 * limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
4048 *
4049 * [1] See Intel spec. 3.5 "System Descriptor Types".
4050 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
4051 */
4052 PVM pVM = pVCpu->CTX_SUFF(pVM);
4053 Assert(pDesc->System.u4Type == 11);
4054 if ( pDesc->System.u16LimitLow != 0x67
4055 || pDesc->System.u4LimitHigh)
4056 {
4057 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
4058 /* If the host has made GDT read-only, we would need to temporarily toggle CR0.WP before writing the GDT. */
4059 if (pVM->hm.s.fHostKernelFeatures & SUPKERNELFEATURES_GDT_READ_ONLY)
4060 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_READ_ONLY;
4061 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
4062 }
4063
4064 /*
4065 * Store the GDTR as we need it when restoring the GDT and while restoring the TR.
4066 */
4067 if (pVCpu->hm.s.vmx.fRestoreHostFlags & (VMX_RESTORE_HOST_GDTR | VMX_RESTORE_HOST_SEL_TR))
4068 {
4069 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
4070 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
4071 if (pVM->hm.s.fHostKernelFeatures & SUPKERNELFEATURES_GDT_NEED_WRITABLE)
4072 {
4073 /* The GDT is read-only but the writable GDT is available. */
4074 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_NEED_WRITABLE;
4075 pVCpu->hm.s.vmx.RestoreHost.HostGdtrRw.cb = Gdtr.cbGdt;
4076 rc = SUPR0GetCurrentGdtRw(&pVCpu->hm.s.vmx.RestoreHost.HostGdtrRw.uAddr);
4077 AssertRCReturn(rc, rc);
4078 }
4079 }
4080#else
4081 uintptr_t const uTRBase = X86DESC_BASE(pDesc);
4082#endif
4083 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
4084 AssertRCReturn(rc, rc);
4085
4086 /*
4087 * Host FS base and GS base.
4088 */
4089#if HC_ARCH_BITS == 64
4090 uint64_t const u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
4091 uint64_t const u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
4092 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase);
4093 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase);
4094 AssertRCReturn(rc, rc);
4095
4096 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
4097 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
4098 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
4099 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
4100 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
4101#endif
4102 return VINF_SUCCESS;
4103}
4104
4105
4106/**
4107 * Exports certain host MSRs in the VM-exit MSR-load area and some in the
4108 * host-state area of the VMCS.
4109 *
4110 * These MSRs will be automatically restored on the host after every successful
4111 * VM-exit.
4112 *
4113 * @returns VBox status code.
4114 * @param pVCpu The cross context virtual CPU structure.
4115 *
4116 * @remarks No-long-jump zone!!!
4117 */
4118static int hmR0VmxExportHostMsrs(PVMCPU pVCpu)
4119{
4120 AssertPtr(pVCpu);
4121
4122 /*
4123 * Save MSRs that we restore lazily (due to preemption or transition to ring-3)
4124 * rather than swapping them on every VM-entry.
4125 */
4126 hmR0VmxLazySaveHostMsrs(pVCpu);
4127
4128 /*
4129 * Host Sysenter MSRs.
4130 */
4131 int rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
4132#if HC_ARCH_BITS == 32
4133 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
4134 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
4135#else
4136 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
4137 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
4138#endif
4139 AssertRCReturn(rc, rc);
4140
4141 /*
4142 * Host EFER MSR.
4143 *
4144 * If the CPU supports the newer VMCS controls for managing EFER, use it. Otherwise it's
4145 * done as part of auto-load/store MSR area in the VMCS, see hmR0VmxExportGuestMsrs().
4146 */
4147 PVM pVM = pVCpu->CTX_SUFF(pVM);
4148 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
4149 {
4150 rc = VMXWriteVmcs64(VMX_VMCS64_HOST_EFER_FULL, pVM->hm.s.vmx.u64HostMsrEfer);
4151 AssertRCReturn(rc, rc);
4152 }
4153
4154 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see
4155 * hmR0VmxExportGuestEntryExitCtls(). */
4156
4157 return VINF_SUCCESS;
4158}
4159
4160
4161/**
4162 * Figures out if we need to swap the EFER MSR which is particularly expensive.
4163 *
4164 * We check all relevant bits. For now, that's everything besides LMA/LME, as
4165 * these two bits are handled by VM-entry, see hmR0VMxExportGuestEntryExitCtls().
4166 *
4167 * @returns true if we need to load guest EFER, false otherwise.
4168 * @param pVCpu The cross context virtual CPU structure.
4169 *
4170 * @remarks Requires EFER, CR4.
4171 * @remarks No-long-jump zone!!!
4172 */
4173static bool hmR0VmxShouldSwapEferMsr(PCVMCPU pVCpu)
4174{
4175#ifdef HMVMX_ALWAYS_SWAP_EFER
4176 RT_NOREF(pVCpu);
4177 return true;
4178#else
4179 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4180#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
4181 /* For 32-bit hosts running 64-bit guests, we always swap EFER MSR in the world-switcher. Nothing to do here. */
4182 if (CPUMIsGuestInLongModeEx(pCtx))
4183 return false;
4184#endif
4185
4186 PVM pVM = pVCpu->CTX_SUFF(pVM);
4187 uint64_t const u64HostEfer = pVM->hm.s.vmx.u64HostMsrEfer;
4188 uint64_t const u64GuestEfer = pCtx->msrEFER;
4189
4190 /*
4191 * For 64-bit guests, if EFER.SCE bit differs, we need to swap the EFER MSR
4192 * to ensure that the guest's SYSCALL behaviour isn't broken, see @bugref{7386}.
4193 */
4194 if ( CPUMIsGuestInLongModeEx(pCtx)
4195 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
4196 return true;
4197
4198 /*
4199 * If the guest uses PAE and EFER.NXE bit differs, we need to swap the EFER MSR
4200 * as it affects guest paging. 64-bit paging implies CR4.PAE as well.
4201 *
4202 * See Intel spec. 4.5 "IA-32e Paging".
4203 * See Intel spec. 4.1.1 "Three Paging Modes".
4204 *
4205 * Verify that we always intercept CR4.PAE and CR0.PG bits, so we don't need to
4206 * import CR4 and CR0 from the VMCS here as those bits are always up to date.
4207 */
4208 Assert(hmR0VmxGetFixedCr4Mask(pVCpu) & X86_CR4_PAE);
4209 Assert(hmR0VmxGetFixedCr0Mask(pVCpu) & X86_CR0_PG);
4210 if ( (pCtx->cr4 & X86_CR4_PAE)
4211 && (pCtx->cr0 & X86_CR0_PG)
4212 && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
4213 {
4214 /* Assert that host is NX capable. */
4215 Assert(pVCpu->CTX_SUFF(pVM)->cpum.ro.HostFeatures.fNoExecute);
4216 return true;
4217 }
4218
4219 return false;
4220#endif
4221}
4222
4223/**
4224 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
4225 * VMCS.
4226 *
4227 * This is typically required when the guest changes paging mode.
4228 *
4229 * @returns VBox status code.
4230 * @param pVCpu The cross context virtual CPU structure.
4231 * @param pVmxTransient The VMX-transient structure.
4232 *
4233 * @remarks Requires EFER.
4234 * @remarks No-long-jump zone!!!
4235 */
4236static int hmR0VmxExportGuestEntryExitCtls(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
4237{
4238 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
4239 {
4240 PVM pVM = pVCpu->CTX_SUFF(pVM);
4241 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
4242
4243 /*
4244 * VM-entry controls.
4245 */
4246 {
4247 uint32_t fVal = pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
4248 uint32_t const fZap = pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
4249
4250 /*
4251 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
4252 * The first VT-x capable CPUs only supported the 1-setting of this bit.
4253 *
4254 * For nested-guests, this is a mandatory VM-entry control. It's also
4255 * required because we do not want to leak host bits to the nested-guest.
4256 */
4257 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
4258
4259 /*
4260 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
4261 *
4262 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
4263 * required to get the nested-guest working with hardware-assisted VMX execution.
4264 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested-hypervisor
4265 * can skip intercepting changes to the EFER MSR. This is why it it needs to be done
4266 * here rather than while merging the guest VMCS controls.
4267 */
4268 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
4269 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
4270 else
4271 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
4272
4273 /*
4274 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
4275 *
4276 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
4277 * regardless of whether the nested-guest VMCS specifies it because we are free to
4278 * load whatever MSRs we require and we do not need to modify the guest visible copy
4279 * of the VM-entry MSR load area.
4280 */
4281 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
4282 && hmR0VmxShouldSwapEferMsr(pVCpu))
4283 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
4284 else
4285 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
4286
4287 /*
4288 * The following should -not- be set (since we're not in SMM mode):
4289 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
4290 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
4291 */
4292
4293 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
4294 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
4295
4296 if ((fVal & fZap) == fVal)
4297 { /* likely */ }
4298 else
4299 {
4300 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
4301 pVM->hm.s.vmx.Msrs.EntryCtls.n.allowed0, fVal, fZap));
4302 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
4303 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
4304 }
4305
4306 /* Commit it to the VMCS. */
4307 if (pVmcsInfo->u32EntryCtls != fVal)
4308 {
4309 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal);
4310 AssertRCReturn(rc, rc);
4311 pVmcsInfo->u32EntryCtls = fVal;
4312 }
4313 }
4314
4315 /*
4316 * VM-exit controls.
4317 */
4318 {
4319 uint32_t fVal = pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
4320 uint32_t const fZap = pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
4321
4322 /*
4323 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
4324 * supported the 1-setting of this bit.
4325 *
4326 * For nested-guests, we set the "save debug controls" as the converse
4327 * "load debug controls" is mandatory for nested-guests anyway.
4328 */
4329 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
4330
4331 /*
4332 * Set the host long mode active (EFER.LMA) bit (which Intel calls
4333 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
4334 * host EFER.LMA and EFER.LME bit to this value. See assertion in
4335 * hmR0VmxExportHostMsrs().
4336 *
4337 * For nested-guests, we always set this bit as we do not support 32-bit
4338 * hosts.
4339 */
4340#if HC_ARCH_BITS == 64
4341 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
4342#else
4343 Assert(!pVmxTransient->fIsNestedGuest);
4344 Assert( pVmcsInfo->pfnStartVM == VMXR0SwitcherStartVM64
4345 || pVmcsInfo->pfnStartVM == VMXR0StartVM32);
4346 /* Set the host address-space size based on the switcher, not guest state. See @bugref{8432}. */
4347 if (pVmcsInfo->pfnStartVM == VMXR0SwitcherStartVM64)
4348 {
4349 /* The switcher returns to long mode, the EFER MSR is managed by the switcher. */
4350 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
4351 }
4352 else
4353 Assert(!(fVal & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE));
4354#endif
4355
4356 /*
4357 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
4358 *
4359 * For nested-guests, we should use the "save IA32_EFER" control if we also
4360 * used the "load IA32_EFER" control while exporting VM-entry controls.
4361 */
4362 if ( pVM->hm.s.vmx.fSupportsVmcsEfer
4363 && hmR0VmxShouldSwapEferMsr(pVCpu))
4364 {
4365 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
4366 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
4367 }
4368
4369 /*
4370 * Enable saving of the VMX-preemption timer value on VM-exit.
4371 * For nested-guests, currently not exposed/used.
4372 */
4373 if ( pVM->hm.s.vmx.fUsePreemptTimer
4374 && (pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
4375 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
4376
4377 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
4378 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
4379
4380 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
4381 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
4382 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
4383
4384 if ((fVal & fZap) == fVal)
4385 { /* likely */ }
4386 else
4387 {
4388 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%R#X32\n",
4389 pVM->hm.s.vmx.Msrs.ExitCtls.n.allowed0, fVal, fZap));
4390 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
4391 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
4392 }
4393
4394 /* Commit it to the VMCS. */
4395 if (pVmcsInfo->u32ExitCtls != fVal)
4396 {
4397 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal);
4398 AssertRCReturn(rc, rc);
4399 pVmcsInfo->u32ExitCtls = fVal;
4400 }
4401 }
4402
4403 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
4404 }
4405 return VINF_SUCCESS;
4406}
4407
4408
4409/**
4410 * Sets the TPR threshold in the VMCS.
4411 *
4412 * @returns VBox status code.
4413 * @param pVCpu The cross context virtual CPU structure.
4414 * @param pVmcsInfo The VMCS info. object.
4415 * @param u32TprThreshold The TPR threshold (task-priority class only).
4416 */
4417DECLINLINE(int) hmR0VmxApicSetTprThreshold(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
4418{
4419 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
4420 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4421 RT_NOREF2(pVCpu, pVmcsInfo);
4422 return VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
4423}
4424
4425
4426/**
4427 * Exports the guest APIC TPR state into the VMCS.
4428 *
4429 * @returns VBox status code.
4430 * @param pVCpu The cross context virtual CPU structure.
4431 * @param pVmxTransient The VMX-transient structure.
4432 *
4433 * @remarks No-long-jump zone!!!
4434 */
4435static int hmR0VmxExportGuestApicTpr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
4436{
4437 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
4438 {
4439 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
4440
4441 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
4442 if (!pVmxTransient->fIsNestedGuest)
4443 {
4444 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
4445 && APICIsEnabled(pVCpu))
4446 {
4447 /*
4448 * Setup TPR shadowing.
4449 */
4450 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4451 {
4452 bool fPendingIntr = false;
4453 uint8_t u8Tpr = 0;
4454 uint8_t u8PendingIntr = 0;
4455 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
4456 AssertRCReturn(rc, rc);
4457
4458 /*
4459 * If there are interrupts pending but masked by the TPR, instruct VT-x to
4460 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
4461 * priority of the pending interrupt so we can deliver the interrupt. If there
4462 * are no interrupts pending, set threshold to 0 to not cause any
4463 * TPR-below-threshold VM-exits.
4464 */
4465 Assert(pVmcsInfo->pbVirtApic);
4466 pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR] = u8Tpr;
4467 uint32_t u32TprThreshold = 0;
4468 if (fPendingIntr)
4469 {
4470 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
4471 (which is the Task-Priority Class). */
4472 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
4473 const uint8_t u8TprPriority = u8Tpr >> 4;
4474 if (u8PendingPriority <= u8TprPriority)
4475 u32TprThreshold = u8PendingPriority;
4476 }
4477
4478 rc = hmR0VmxApicSetTprThreshold(pVCpu, pVmcsInfo, u32TprThreshold);
4479 AssertRCReturn(rc, rc);
4480 }
4481 }
4482 }
4483 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
4484 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
4485 }
4486 return VINF_SUCCESS;
4487}
4488
4489
4490/**
4491 * Gets the guest interruptibility-state.
4492 *
4493 * @returns Guest's interruptibility-state.
4494 * @param pVCpu The cross context virtual CPU structure.
4495 * @param pVmcsInfo The VMCS info. object.
4496 *
4497 * @remarks No-long-jump zone!!!
4498 */
4499static uint32_t hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
4500{
4501 /*
4502 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
4503 */
4504 uint32_t fIntrState = 0;
4505 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4506 {
4507 /* If inhibition is active, RIP and RFLAGS should've been updated
4508 (i.e. read previously from the VMCS or from ring-3). */
4509 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4510#ifdef VBOX_STRICT
4511 uint64_t const fExtrn = ASMAtomicUoReadU64(&pCtx->fExtrn);
4512 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
4513 AssertMsg(!(fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)), ("%#x\n", fExtrn));
4514#endif
4515 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
4516 {
4517 if (pCtx->eflags.Bits.u1IF)
4518 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
4519 else
4520 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
4521 }
4522 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
4523 {
4524 /*
4525 * We can clear the inhibit force flag as even if we go back to the recompiler
4526 * without executing guest code in VT-x, the flag's condition to be cleared is
4527 * met and thus the cleared state is correct.
4528 */
4529 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
4530 }
4531 }
4532
4533 /*
4534 * NMIs to the guest are blocked after an NMI is injected until the guest executes an IRET. We only
4535 * bother with virtual-NMI blocking when we have support for virtual NMIs in the CPU, otherwise
4536 * setting this would block host-NMIs and IRET will not clear the blocking.
4537 *
4538 * We always set NMI-exiting so when the host receives an NMI we get a VM-exit.
4539 *
4540 * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}.
4541 */
4542 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
4543 && CPUMIsGuestNmiBlocking(pVCpu))
4544 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
4545
4546 return fIntrState;
4547}
4548
4549
4550/**
4551 * Exports the exception intercepts required for guest execution in the VMCS.
4552 *
4553 * @returns VBox status code.
4554 * @param pVCpu The cross context virtual CPU structure.
4555 * @param pVmxTransient The VMX-transient structure.
4556 *
4557 * @remarks No-long-jump zone!!!
4558 */
4559static int hmR0VmxExportGuestXcptIntercepts(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
4560{
4561 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS)
4562 {
4563 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
4564 if ( !pVmxTransient->fIsNestedGuest
4565 && pVCpu->hm.s.fGIMTrapXcptUD)
4566 hmR0VmxAddXcptIntercept(pVmxTransient, X86_XCPT_UD);
4567 else
4568 hmR0VmxRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
4569
4570 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
4571 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
4572 }
4573 return VINF_SUCCESS;
4574}
4575
4576
4577/**
4578 * Exports the guest's RIP into the guest-state area in the VMCS.
4579 *
4580 * @returns VBox status code.
4581 * @param pVCpu The cross context virtual CPU structure.
4582 *
4583 * @remarks No-long-jump zone!!!
4584 */
4585static int hmR0VmxExportGuestRip(PVMCPU pVCpu)
4586{
4587 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP)
4588 {
4589 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
4590
4591 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
4592 AssertRCReturn(rc, rc);
4593
4594 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RIP);
4595 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
4596 }
4597 return VINF_SUCCESS;
4598}
4599
4600
4601/**
4602 * Exports the guest's RSP into the guest-state area in the VMCS.
4603 *
4604 * @returns VBox status code.
4605 * @param pVCpu The cross context virtual CPU structure.
4606 *
4607 * @remarks No-long-jump zone!!!
4608 */
4609static int hmR0VmxExportGuestRsp(PVMCPU pVCpu)
4610{
4611 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RSP)
4612 {
4613 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP);
4614
4615 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pVCpu->cpum.GstCtx.rsp);
4616 AssertRCReturn(rc, rc);
4617
4618 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RSP);
4619 }
4620 return VINF_SUCCESS;
4621}
4622
4623
4624/**
4625 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
4626 *
4627 * @returns VBox status code.
4628 * @param pVCpu The cross context virtual CPU structure.
4629 * @param pVmxTransient The VMX-transient structure.
4630 *
4631 * @remarks No-long-jump zone!!!
4632 */
4633static int hmR0VmxExportGuestRflags(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
4634{
4635 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
4636 {
4637 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
4638
4639 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
4640 Let us assert it as such and use 32-bit VMWRITE. */
4641 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
4642 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
4643 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
4644 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
4645
4646 /*
4647 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
4648 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
4649 * can run the real-mode guest code under Virtual 8086 mode.
4650 */
4651 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
4652 if (pVmcsInfo->RealMode.fRealOnV86Active)
4653 {
4654 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
4655 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
4656 Assert(!pVmxTransient->fIsNestedGuest);
4657 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
4658 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
4659 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
4660 }
4661
4662 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
4663 AssertRCReturn(rc, rc);
4664
4665 /*
4666 * Setup pending debug exceptions if the guest is single-stepping using EFLAGS.TF.
4667 *
4668 * We must avoid setting any automatic debug exceptions delivery when single-stepping
4669 * through the hypervisor debugger using EFLAGS.TF.
4670 */
4671 if ( !pVmxTransient->fIsNestedGuest
4672 && !pVCpu->hm.s.fSingleInstruction
4673 && fEFlags.Bits.u1TF)
4674 {
4675 /** @todo r=ramshankar: Warning!! We ASSUME EFLAGS.TF will not cleared on
4676 * premature trips to ring-3 esp since IEM does not yet handle it. */
4677 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS);
4678 AssertRCReturn(rc, rc);
4679 }
4680 /** @todo NSTVMX: Handling copying of VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS from
4681 * nested-guest VMCS. */
4682
4683 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
4684 Log4Func(("EFlags=%#RX32\n", fEFlags.u32));
4685 }
4686 return VINF_SUCCESS;
4687}
4688
4689
4690/**
4691 * Exports the guest CR0 control register into the guest-state area in the VMCS.
4692 *
4693 * The guest FPU state is always pre-loaded hence we don't need to bother about
4694 * sharing FPU related CR0 bits between the guest and host.
4695 *
4696 * @returns VBox status code.
4697 * @param pVCpu The cross context virtual CPU structure.
4698 * @param pVmxTransient The VMX-transient structure.
4699 *
4700 * @remarks No-long-jump zone!!!
4701 */
4702static int hmR0VmxExportGuestCR0(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
4703{
4704 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR0)
4705 {
4706 PVM pVM = pVCpu->CTX_SUFF(pVM);
4707 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
4708
4709 /*
4710 * Figure out fixed CR0 bits in VMX operation.
4711 */
4712 uint64_t fSetCr0 = pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
4713 uint64_t const fZapCr0 = pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
4714 if (pVM->hm.s.vmx.fUnrestrictedGuest)
4715 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
4716 else
4717 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
4718
4719 if (!pVmxTransient->fIsNestedGuest)
4720 {
4721 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
4722 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
4723 uint64_t const u64ShadowCr0 = u64GuestCr0;
4724 Assert(!RT_HI_U32(u64GuestCr0));
4725
4726 /*
4727 * Setup VT-x's view of the guest CR0.
4728 */
4729 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
4730 if (pVM->hm.s.fNestedPaging)
4731 {
4732 if (CPUMIsGuestPagingEnabled(pVCpu))
4733 {
4734 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
4735 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
4736 | VMX_PROC_CTLS_CR3_STORE_EXIT);
4737 }
4738 else
4739 {
4740 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
4741 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
4742 | VMX_PROC_CTLS_CR3_STORE_EXIT;
4743 }
4744
4745 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
4746 if (pVM->hm.s.vmx.fUnrestrictedGuest)
4747 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
4748 }
4749 else
4750 {
4751 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
4752 u64GuestCr0 |= X86_CR0_WP;
4753 }
4754
4755 /*
4756 * Guest FPU bits.
4757 *
4758 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
4759 * using CR0.TS.
4760 *
4761 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
4762 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
4763 */
4764 u64GuestCr0 |= X86_CR0_NE;
4765
4766 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
4767 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
4768
4769 /*
4770 * Update exception intercepts.
4771 */
4772 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
4773 if (pVmcsInfo->RealMode.fRealOnV86Active)
4774 {
4775 Assert(PDMVmmDevHeapIsEnabled(pVM));
4776 Assert(pVM->hm.s.vmx.pRealModeTSS);
4777 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
4778 }
4779 else
4780 {
4781 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
4782 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
4783 if (fInterceptMF)
4784 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
4785 }
4786
4787 /* Additional intercepts for debugging, define these yourself explicitly. */
4788#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
4789 uXcptBitmap |= 0
4790 | RT_BIT(X86_XCPT_BP)
4791 | RT_BIT(X86_XCPT_DE)
4792 | RT_BIT(X86_XCPT_NM)
4793 | RT_BIT(X86_XCPT_TS)
4794 | RT_BIT(X86_XCPT_UD)
4795 | RT_BIT(X86_XCPT_NP)
4796 | RT_BIT(X86_XCPT_SS)
4797 | RT_BIT(X86_XCPT_GP)
4798 | RT_BIT(X86_XCPT_PF)
4799 | RT_BIT(X86_XCPT_MF)
4800 ;
4801#elif defined(HMVMX_ALWAYS_TRAP_PF)
4802 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
4803#endif
4804 if (pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv)
4805 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
4806 Assert(pVM->hm.s.fNestedPaging || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
4807
4808 /* Apply the fixed CR0 bits and enable caching. */
4809 u64GuestCr0 |= fSetCr0;
4810 u64GuestCr0 &= fZapCr0;
4811 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
4812
4813 /* Commit the CR0 and related fields to the guest VMCS. */
4814 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u64GuestCr0); /** @todo Fix to 64-bit when we drop 32-bit. */
4815 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0);
4816 if (uProcCtls != pVmcsInfo->u32ProcCtls)
4817 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
4818 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
4819 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
4820 AssertRCReturn(rc, rc);
4821
4822 /* Update our caches. */
4823 pVmcsInfo->u32ProcCtls = uProcCtls;
4824 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
4825
4826 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
4827 }
4828 else
4829 {
4830 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4831 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
4832 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
4833 uint64_t const u64ShadowCr0 = pVmcsNstGst->u64Cr0ReadShadow.u;
4834 Assert(!RT_HI_U32(u64GuestCr0));
4835 Assert(u64GuestCr0 & X86_CR0_NE);
4836
4837 /* Apply the fixed CR0 bits and enable caching. */
4838 u64GuestCr0 |= fSetCr0;
4839 u64GuestCr0 &= fZapCr0;
4840 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
4841
4842 /* Commit the CR0 and CR0 read shadow to the nested-guest VMCS. */
4843 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u64GuestCr0); /** @todo NSTVMX: Fix to 64-bit when we drop 32-bit. */
4844 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0);
4845 AssertRCReturn(rc, rc);
4846
4847 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
4848 }
4849
4850 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR0);
4851 }
4852
4853 return VINF_SUCCESS;
4854}
4855
4856
4857/**
4858 * Exports the guest control registers (CR3, CR4) into the guest-state area
4859 * in the VMCS.
4860 *
4861 * @returns VBox strict status code.
4862 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
4863 * without unrestricted guest access and the VMMDev is not presently
4864 * mapped (e.g. EFI32).
4865 *
4866 * @param pVCpu The cross context virtual CPU structure.
4867 * @param pVmxTransient The VMX-transient structure.
4868 *
4869 * @remarks No-long-jump zone!!!
4870 */
4871static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
4872{
4873 int rc = VINF_SUCCESS;
4874 PVM pVM = pVCpu->CTX_SUFF(pVM);
4875
4876 /*
4877 * Guest CR2.
4878 * It's always loaded in the assembler code. Nothing to do here.
4879 */
4880
4881 /*
4882 * Guest CR3.
4883 */
4884 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR3)
4885 {
4886 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
4887
4888 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
4889 if (pVM->hm.s.fNestedPaging)
4890 {
4891 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
4892 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
4893
4894 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
4895 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
4896 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
4897 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
4898
4899 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
4900 pVmcsInfo->HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
4901 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
4902
4903 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
4904 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
4905 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
4906 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
4907 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
4908 || (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EPT_ACCESS_DIRTY),
4909 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
4910
4911 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
4912 AssertRCReturn(rc, rc);
4913
4914 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4915 if ( pVM->hm.s.vmx.fUnrestrictedGuest
4916 || CPUMIsGuestPagingEnabledEx(pCtx))
4917 {
4918 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
4919 if (CPUMIsGuestInPAEModeEx(pCtx))
4920 {
4921 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
4922 AssertRCReturn(rc, rc);
4923 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u);
4924 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u);
4925 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u);
4926 rc |= VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u);
4927 AssertRCReturn(rc, rc);
4928 }
4929
4930 /*
4931 * The guest's view of its CR3 is unblemished with nested paging when the
4932 * guest is using paging or we have unrestricted guest execution to handle
4933 * the guest when it's not using paging.
4934 */
4935 GCPhysGuestCR3 = pCtx->cr3;
4936 }
4937 else
4938 {
4939 /*
4940 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
4941 * thinks it accesses physical memory directly, we use our identity-mapped
4942 * page table to map guest-linear to guest-physical addresses. EPT takes care
4943 * of translating it to host-physical addresses.
4944 */
4945 RTGCPHYS GCPhys;
4946 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
4947
4948 /* We obtain it here every time as the guest could have relocated this PCI region. */
4949 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
4950 if (RT_SUCCESS(rc))
4951 { /* likely */ }
4952 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
4953 {
4954 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
4955 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
4956 }
4957 else
4958 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
4959
4960 GCPhysGuestCR3 = GCPhys;
4961 }
4962
4963 Log4Func(("u32GuestCr3=%#RGp (GstN)\n", GCPhysGuestCR3));
4964 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
4965 AssertRCReturn(rc, rc);
4966 }
4967 else
4968 {
4969 /* Non-nested paging case, just use the hypervisor's CR3. */
4970 RTHCPHYS const HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
4971
4972 Log4Func(("u32GuestCr3=%#RHv (HstN)\n", HCPhysGuestCR3));
4973 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
4974 AssertRCReturn(rc, rc);
4975 }
4976
4977 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR3);
4978 }
4979
4980 /*
4981 * Guest CR4.
4982 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
4983 */
4984 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR4)
4985 {
4986 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4987 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
4988 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4989
4990 /*
4991 * Figure out fixed CR4 bits in VMX operation.
4992 */
4993 uint64_t const fSetCr4 = pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
4994 uint64_t const fZapCr4 = pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
4995
4996 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
4997 uint64_t u64GuestCr4 = pCtx->cr4;
4998 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest ? pCtx->cr4 : pVmcsNstGst->u64Cr4ReadShadow.u;
4999 Assert(!RT_HI_U32(u64GuestCr4));
5000
5001 /*
5002 * Setup VT-x's view of the guest CR4.
5003 *
5004 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
5005 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
5006 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
5007 *
5008 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
5009 */
5010 if (pVmcsInfo->RealMode.fRealOnV86Active)
5011 {
5012 Assert(pVM->hm.s.vmx.pRealModeTSS);
5013 Assert(PDMVmmDevHeapIsEnabled(pVM));
5014 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
5015 }
5016
5017 if (pVM->hm.s.fNestedPaging)
5018 {
5019 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
5020 && !pVM->hm.s.vmx.fUnrestrictedGuest)
5021 {
5022 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
5023 u64GuestCr4 |= X86_CR4_PSE;
5024 /* Our identity mapping is a 32-bit page directory. */
5025 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
5026 }
5027 /* else use guest CR4.*/
5028 }
5029 else
5030 {
5031 Assert(!pVmxTransient->fIsNestedGuest);
5032
5033 /*
5034 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
5035 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
5036 */
5037 switch (pVCpu->hm.s.enmShadowMode)
5038 {
5039 case PGMMODE_REAL: /* Real-mode. */
5040 case PGMMODE_PROTECTED: /* Protected mode without paging. */
5041 case PGMMODE_32_BIT: /* 32-bit paging. */
5042 {
5043 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
5044 break;
5045 }
5046
5047 case PGMMODE_PAE: /* PAE paging. */
5048 case PGMMODE_PAE_NX: /* PAE paging with NX. */
5049 {
5050 u64GuestCr4 |= X86_CR4_PAE;
5051 break;
5052 }
5053
5054 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
5055 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
5056#ifdef VBOX_ENABLE_64_BITS_GUESTS
5057 break;
5058#endif
5059 default:
5060 AssertFailed();
5061 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
5062 }
5063 }
5064
5065 /* Apply the fixed CR4 bits (mainly CR4.VMXE). */
5066 u64GuestCr4 |= fSetCr4;
5067 u64GuestCr4 &= fZapCr4;
5068
5069 /* Commit the CR4 and CR4 read shadow to the guest VMCS. */
5070 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u64GuestCr4); /** @todo Fix to 64-bit when we drop 32-bit. */
5071 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4);
5072 AssertRCReturn(rc, rc);
5073
5074 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
5075 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
5076
5077 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4);
5078
5079 Log4Func(("cr4=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
5080 }
5081 return rc;
5082}
5083
5084
5085/**
5086 * Exports the guest debug registers into the guest-state area in the VMCS.
5087 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
5088 *
5089 * This also sets up whether \#DB and MOV DRx accesses cause VM-exits.
5090 *
5091 * @returns VBox status code.
5092 * @param pVCpu The cross context virtual CPU structure.
5093 * @param pVmxTransient The VMX-transient structure.
5094 *
5095 * @remarks No-long-jump zone!!!
5096 */
5097static int hmR0VmxExportSharedDebugState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
5098{
5099 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
5100
5101 /** @todo NSTVMX: Figure out what we want to do with nested-guest instruction
5102 * stepping. */
5103 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5104 if (pVmxTransient->fIsNestedGuest)
5105 {
5106 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, CPUMGetGuestDR7(pVCpu));
5107 AssertRCReturn(rc, rc);
5108 return VINF_SUCCESS;
5109 }
5110
5111#ifdef VBOX_STRICT
5112 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
5113 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5114 {
5115 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
5116 Assert((pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0);
5117 Assert((pVCpu->cpum.GstCtx.dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK);
5118 }
5119#endif
5120
5121 bool fSteppingDB = false;
5122 bool fInterceptMovDRx = false;
5123 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
5124 if (pVCpu->hm.s.fSingleInstruction)
5125 {
5126 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
5127 PVM pVM = pVCpu->CTX_SUFF(pVM);
5128 if (pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MONITOR_TRAP_FLAG)
5129 {
5130 uProcCtls |= VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
5131 Assert(fSteppingDB == false);
5132 }
5133 else
5134 {
5135 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_TF;
5136 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_RFLAGS;
5137 pVCpu->hm.s.fClearTrapFlag = true;
5138 fSteppingDB = true;
5139 }
5140 }
5141
5142 uint32_t u32GuestDr7;
5143 if ( fSteppingDB
5144 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
5145 {
5146 /*
5147 * Use the combined guest and host DRx values found in the hypervisor register set
5148 * because the hypervisor debugger has breakpoints active or someone is single stepping
5149 * on the host side without a monitor trap flag.
5150 *
5151 * Note! DBGF expects a clean DR6 state before executing guest code.
5152 */
5153#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
5154 if ( CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5155 && !CPUMIsHyperDebugStateActivePending(pVCpu))
5156 {
5157 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
5158 Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
5159 Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
5160 }
5161 else
5162#endif
5163 if (!CPUMIsHyperDebugStateActive(pVCpu))
5164 {
5165 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
5166 Assert(CPUMIsHyperDebugStateActive(pVCpu));
5167 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
5168 }
5169
5170 /* Update DR7 with the hypervisor value (other DRx registers are handled by CPUM one way or another). */
5171 u32GuestDr7 = (uint32_t)CPUMGetHyperDR7(pVCpu);
5172 pVCpu->hm.s.fUsingHyperDR7 = true;
5173 fInterceptMovDRx = true;
5174 }
5175 else
5176 {
5177 /*
5178 * If the guest has enabled debug registers, we need to load them prior to
5179 * executing guest code so they'll trigger at the right time.
5180 */
5181 if (pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
5182 {
5183#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
5184 if ( CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
5185 && !CPUMIsGuestDebugStateActivePending(pVCpu))
5186 {
5187 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
5188 Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
5189 Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
5190 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
5191 }
5192 else
5193#endif
5194 if (!CPUMIsGuestDebugStateActive(pVCpu))
5195 {
5196 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
5197 Assert(CPUMIsGuestDebugStateActive(pVCpu));
5198 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
5199 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
5200 }
5201 Assert(!fInterceptMovDRx);
5202 }
5203 /*
5204 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
5205 * must intercept #DB in order to maintain a correct DR6 guest value, and
5206 * because we need to intercept it to prevent nested #DBs from hanging the
5207 * CPU, we end up always having to intercept it. See hmR0VmxSetupVmcsXcptBitmap().
5208 */
5209#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
5210 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu)
5211 && !CPUMIsGuestDebugStateActive(pVCpu))
5212#else
5213 else if (!CPUMIsGuestDebugStateActive(pVCpu))
5214#endif
5215 {
5216 fInterceptMovDRx = true;
5217 }
5218
5219 /* Update DR7 with the actual guest value. */
5220 u32GuestDr7 = pVCpu->cpum.GstCtx.dr[7];
5221 pVCpu->hm.s.fUsingHyperDR7 = false;
5222 }
5223
5224 if (fInterceptMovDRx)
5225 uProcCtls |= VMX_PROC_CTLS_MOV_DR_EXIT;
5226 else
5227 uProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
5228
5229 /*
5230 * Update the processor-based VM-execution controls with the MOV-DRx intercepts and the
5231 * monitor-trap flag and update our cache.
5232 */
5233 if (uProcCtls != pVmcsInfo->u32ProcCtls)
5234 {
5235 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
5236 AssertRCReturn(rc2, rc2);
5237 pVmcsInfo->u32ProcCtls = uProcCtls;
5238 }
5239
5240 /*
5241 * Update guest DR7.
5242 */
5243 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, u32GuestDr7);
5244 AssertRCReturn(rc, rc);
5245
5246 /*
5247 * If we have forced EFLAGS.TF to be set because we're single-stepping in the hypervisor debugger,
5248 * we need to clear interrupt inhibition if any as otherwise it causes a VM-entry failure.
5249 *
5250 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
5251 */
5252 if (fSteppingDB)
5253 {
5254 Assert(pVCpu->hm.s.fSingleInstruction);
5255 Assert(pVCpu->cpum.GstCtx.eflags.Bits.u1TF);
5256
5257 uint32_t fIntrState = 0;
5258 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
5259 AssertRCReturn(rc, rc);
5260
5261 if (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5262 {
5263 fIntrState &= ~(VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
5264 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
5265 AssertRCReturn(rc, rc);
5266 }
5267 }
5268
5269 return VINF_SUCCESS;
5270}
5271
5272
5273#ifdef VBOX_STRICT
5274/**
5275 * Strict function to validate segment registers.
5276 *
5277 * @param pVCpu The cross context virtual CPU structure.
5278 * @param pVmcsInfo The VMCS info. object.
5279 *
5280 * @remarks Will import guest CR0 on strict builds during validation of
5281 * segments.
5282 */
5283static void hmR0VmxValidateSegmentRegs(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
5284{
5285 /*
5286 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
5287 *
5288 * The reason we check for attribute value 0 in this function and not just the unusable bit is
5289 * because hmR0VmxExportGuestSegReg() only updates the VMCS' copy of the value with the
5290 * unusable bit and doesn't change the guest-context value.
5291 */
5292 PVM pVM = pVCpu->CTX_SUFF(pVM);
5293 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5294 hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
5295 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
5296 && ( !CPUMIsGuestInRealModeEx(pCtx)
5297 && !CPUMIsGuestInV86ModeEx(pCtx)))
5298 {
5299 /* Protected mode checks */
5300 /* CS */
5301 Assert(pCtx->cs.Attr.n.u1Present);
5302 Assert(!(pCtx->cs.Attr.u & 0xf00));
5303 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
5304 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
5305 || !(pCtx->cs.Attr.n.u1Granularity));
5306 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
5307 || (pCtx->cs.Attr.n.u1Granularity));
5308 /* CS cannot be loaded with NULL in protected mode. */
5309 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
5310 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
5311 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
5312 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
5313 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
5314 else
5315 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
5316 /* SS */
5317 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
5318 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
5319 if ( !(pCtx->cr0 & X86_CR0_PE)
5320 || pCtx->cs.Attr.n.u4Type == 3)
5321 {
5322 Assert(!pCtx->ss.Attr.n.u2Dpl);
5323 }
5324 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
5325 {
5326 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
5327 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
5328 Assert(pCtx->ss.Attr.n.u1Present);
5329 Assert(!(pCtx->ss.Attr.u & 0xf00));
5330 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
5331 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
5332 || !(pCtx->ss.Attr.n.u1Granularity));
5333 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
5334 || (pCtx->ss.Attr.n.u1Granularity));
5335 }
5336 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSegReg(). */
5337 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
5338 {
5339 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
5340 Assert(pCtx->ds.Attr.n.u1Present);
5341 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
5342 Assert(!(pCtx->ds.Attr.u & 0xf00));
5343 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
5344 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
5345 || !(pCtx->ds.Attr.n.u1Granularity));
5346 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
5347 || (pCtx->ds.Attr.n.u1Granularity));
5348 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5349 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
5350 }
5351 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
5352 {
5353 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
5354 Assert(pCtx->es.Attr.n.u1Present);
5355 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
5356 Assert(!(pCtx->es.Attr.u & 0xf00));
5357 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
5358 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
5359 || !(pCtx->es.Attr.n.u1Granularity));
5360 Assert( !(pCtx->es.u32Limit & 0xfff00000)
5361 || (pCtx->es.Attr.n.u1Granularity));
5362 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5363 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
5364 }
5365 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
5366 {
5367 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
5368 Assert(pCtx->fs.Attr.n.u1Present);
5369 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
5370 Assert(!(pCtx->fs.Attr.u & 0xf00));
5371 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
5372 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
5373 || !(pCtx->fs.Attr.n.u1Granularity));
5374 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
5375 || (pCtx->fs.Attr.n.u1Granularity));
5376 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5377 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
5378 }
5379 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
5380 {
5381 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
5382 Assert(pCtx->gs.Attr.n.u1Present);
5383 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
5384 Assert(!(pCtx->gs.Attr.u & 0xf00));
5385 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
5386 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
5387 || !(pCtx->gs.Attr.n.u1Granularity));
5388 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
5389 || (pCtx->gs.Attr.n.u1Granularity));
5390 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
5391 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
5392 }
5393 /* 64-bit capable CPUs. */
5394# if HC_ARCH_BITS == 64
5395 Assert(!RT_HI_U32(pCtx->cs.u64Base));
5396 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
5397 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
5398 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
5399# endif
5400 }
5401 else if ( CPUMIsGuestInV86ModeEx(pCtx)
5402 || ( CPUMIsGuestInRealModeEx(pCtx)
5403 && !pVM->hm.s.vmx.fUnrestrictedGuest))
5404 {
5405 /* Real and v86 mode checks. */
5406 /* hmR0VmxExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
5407 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
5408 if (pVmcsInfo->RealMode.fRealOnV86Active)
5409 {
5410 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
5411 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
5412 }
5413 else
5414 {
5415 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
5416 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
5417 }
5418
5419 /* CS */
5420 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
5421 Assert(pCtx->cs.u32Limit == 0xffff);
5422 Assert(u32CSAttr == 0xf3);
5423 /* SS */
5424 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
5425 Assert(pCtx->ss.u32Limit == 0xffff);
5426 Assert(u32SSAttr == 0xf3);
5427 /* DS */
5428 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
5429 Assert(pCtx->ds.u32Limit == 0xffff);
5430 Assert(u32DSAttr == 0xf3);
5431 /* ES */
5432 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
5433 Assert(pCtx->es.u32Limit == 0xffff);
5434 Assert(u32ESAttr == 0xf3);
5435 /* FS */
5436 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
5437 Assert(pCtx->fs.u32Limit == 0xffff);
5438 Assert(u32FSAttr == 0xf3);
5439 /* GS */
5440 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
5441 Assert(pCtx->gs.u32Limit == 0xffff);
5442 Assert(u32GSAttr == 0xf3);
5443 /* 64-bit capable CPUs. */
5444# if HC_ARCH_BITS == 64
5445 Assert(!RT_HI_U32(pCtx->cs.u64Base));
5446 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
5447 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
5448 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
5449# endif
5450 }
5451}
5452#endif /* VBOX_STRICT */
5453
5454
5455/**
5456 * Exports a guest segment register into the guest-state area in the VMCS.
5457 *
5458 * @returns VBox status code.
5459 * @param pVCpu The cross context virtual CPU structure.
5460 * @param pVmcsInfo The VMCS info. object.
5461 * @param iSegReg The segment register number (X86_SREG_XXX).
5462 * @param pSelReg Pointer to the segment selector.
5463 *
5464 * @remarks No-long-jump zone!!!
5465 */
5466static int hmR0VmxExportGuestSegReg(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint8_t iSegReg, PCCPUMSELREG pSelReg)
5467{
5468 Assert(iSegReg < X86_SREG_COUNT);
5469 uint32_t const idxSel = g_aVmcsSegSel[iSegReg];
5470 uint32_t const idxLimit = g_aVmcsSegLimit[iSegReg];
5471 uint32_t const idxBase = g_aVmcsSegBase[iSegReg];
5472 uint32_t const idxAttr = g_aVmcsSegAttr[iSegReg];
5473
5474 uint32_t u32Access = pSelReg->Attr.u;
5475 if (pVmcsInfo->RealMode.fRealOnV86Active)
5476 {
5477 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
5478 u32Access = 0xf3;
5479 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
5480 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
5481 RT_NOREF_PV(pVCpu);
5482 }
5483 else
5484 {
5485 /*
5486 * The way to differentiate between whether this is really a null selector or was just
5487 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
5488 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
5489 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
5490 * NULL selectors loaded in protected-mode have their attribute as 0.
5491 */
5492 if (!u32Access)
5493 u32Access = X86DESCATTR_UNUSABLE;
5494 }
5495
5496 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
5497 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
5498 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
5499
5500 /*
5501 * Commit it to the VMCS.
5502 */
5503 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel);
5504 rc |= VMXWriteVmcs32(idxLimit, pSelReg->u32Limit);
5505 rc |= VMXWriteVmcsGstN(idxBase, pSelReg->u64Base);
5506 rc |= VMXWriteVmcs32(idxAttr, u32Access);
5507 AssertRCReturn(rc, rc);
5508 return rc;
5509}
5510
5511
5512/**
5513 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
5514 * area in the VMCS.
5515 *
5516 * @returns VBox status code.
5517 * @param pVCpu The cross context virtual CPU structure.
5518 * @param pVmxTransient The VMX-transient structure.
5519 *
5520 * @remarks Will import guest CR0 on strict builds during validation of
5521 * segments.
5522 * @remarks No-long-jump zone!!!
5523 */
5524static int hmR0VmxExportGuestSegRegsXdtr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
5525{
5526 int rc = VERR_INTERNAL_ERROR_5;
5527 PVM pVM = pVCpu->CTX_SUFF(pVM);
5528 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5529 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5530
5531 /*
5532 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
5533 */
5534 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
5535 {
5536#ifdef VBOX_WITH_REM
5537 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
5538 {
5539 Assert(!pVmxTransient->fIsNestedGuest);
5540 Assert(pVM->hm.s.vmx.pRealModeTSS);
5541 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
5542 if ( pVmcsInfo->fWasInRealMode
5543 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
5544 {
5545 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
5546 in real-mode (e.g. OpenBSD 4.0) */
5547 REMFlushTBs(pVM);
5548 Log4Func(("Switch to protected mode detected!\n"));
5549 pVmcsInfo->fWasInRealMode = false;
5550 }
5551 }
5552#endif
5553 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CS)
5554 {
5555 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
5556 if (pVmcsInfo->RealMode.fRealOnV86Active)
5557 pVmcsInfo->RealMode.AttrCS.u = pCtx->cs.Attr.u;
5558 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
5559 AssertRCReturn(rc, rc);
5560 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CS);
5561 }
5562
5563 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SS)
5564 {
5565 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
5566 if (pVmcsInfo->RealMode.fRealOnV86Active)
5567 pVmcsInfo->RealMode.AttrSS.u = pCtx->ss.Attr.u;
5568 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
5569 AssertRCReturn(rc, rc);
5570 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SS);
5571 }
5572
5573 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_DS)
5574 {
5575 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
5576 if (pVmcsInfo->RealMode.fRealOnV86Active)
5577 pVmcsInfo->RealMode.AttrDS.u = pCtx->ds.Attr.u;
5578 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
5579 AssertRCReturn(rc, rc);
5580 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_DS);
5581 }
5582
5583 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_ES)
5584 {
5585 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
5586 if (pVmcsInfo->RealMode.fRealOnV86Active)
5587 pVmcsInfo->RealMode.AttrES.u = pCtx->es.Attr.u;
5588 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
5589 AssertRCReturn(rc, rc);
5590 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_ES);
5591 }
5592
5593 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_FS)
5594 {
5595 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
5596 if (pVmcsInfo->RealMode.fRealOnV86Active)
5597 pVmcsInfo->RealMode.AttrFS.u = pCtx->fs.Attr.u;
5598 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
5599 AssertRCReturn(rc, rc);
5600 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_FS);
5601 }
5602
5603 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GS)
5604 {
5605 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
5606 if (pVmcsInfo->RealMode.fRealOnV86Active)
5607 pVmcsInfo->RealMode.AttrGS.u = pCtx->gs.Attr.u;
5608 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
5609 AssertRCReturn(rc, rc);
5610 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GS);
5611 }
5612
5613#ifdef VBOX_STRICT
5614 hmR0VmxValidateSegmentRegs(pVCpu, pVmcsInfo);
5615#endif
5616 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
5617 pCtx->cs.Attr.u));
5618 }
5619
5620 /*
5621 * Guest TR.
5622 */
5623 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_TR)
5624 {
5625 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
5626
5627 /*
5628 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
5629 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
5630 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
5631 */
5632 uint16_t u16Sel;
5633 uint32_t u32Limit;
5634 uint64_t u64Base;
5635 uint32_t u32AccessRights;
5636 if (!pVmcsInfo->RealMode.fRealOnV86Active)
5637 {
5638 u16Sel = pCtx->tr.Sel;
5639 u32Limit = pCtx->tr.u32Limit;
5640 u64Base = pCtx->tr.u64Base;
5641 u32AccessRights = pCtx->tr.Attr.u;
5642 }
5643 else
5644 {
5645 Assert(!pVmxTransient->fIsNestedGuest);
5646 Assert(pVM->hm.s.vmx.pRealModeTSS);
5647 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
5648
5649 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
5650 RTGCPHYS GCPhys;
5651 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
5652 AssertRCReturn(rc, rc);
5653
5654 X86DESCATTR DescAttr;
5655 DescAttr.u = 0;
5656 DescAttr.n.u1Present = 1;
5657 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
5658
5659 u16Sel = 0;
5660 u32Limit = HM_VTX_TSS_SIZE;
5661 u64Base = GCPhys;
5662 u32AccessRights = DescAttr.u;
5663 }
5664
5665 /* Validate. */
5666 Assert(!(u16Sel & RT_BIT(2)));
5667 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
5668 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
5669 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
5670 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
5671 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
5672 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
5673 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
5674 Assert( (u32Limit & 0xfff) == 0xfff
5675 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
5676 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
5677 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
5678
5679 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_TR_SEL, u16Sel);
5680 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit);
5681 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights);
5682 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base);
5683 AssertRCReturn(rc, rc);
5684
5685 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_TR);
5686 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
5687 }
5688
5689 /*
5690 * Guest GDTR.
5691 */
5692 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GDTR)
5693 {
5694 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
5695
5696 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt);
5697 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt);
5698 AssertRCReturn(rc, rc);
5699
5700 /* Validate. */
5701 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
5702
5703 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
5704 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
5705 }
5706
5707 /*
5708 * Guest LDTR.
5709 */
5710 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_LDTR)
5711 {
5712 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
5713
5714 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
5715 uint32_t u32Access;
5716 if ( !pVmxTransient->fIsNestedGuest
5717 && !pCtx->ldtr.Attr.u)
5718 u32Access = X86DESCATTR_UNUSABLE;
5719 else
5720 u32Access = pCtx->ldtr.Attr.u;
5721
5722 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel);
5723 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit);
5724 rc |= VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access);
5725 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base);
5726 AssertRCReturn(rc, rc);
5727
5728 /* Validate. */
5729 if (!(u32Access & X86DESCATTR_UNUSABLE))
5730 {
5731 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
5732 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
5733 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
5734 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
5735 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
5736 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
5737 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
5738 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
5739 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
5740 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
5741 }
5742
5743 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
5744 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
5745 }
5746
5747 /*
5748 * Guest IDTR.
5749 */
5750 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_IDTR)
5751 {
5752 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
5753
5754 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt);
5755 rc |= VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt);
5756 AssertRCReturn(rc, rc);
5757
5758 /* Validate. */
5759 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
5760
5761 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
5762 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
5763 }
5764
5765 return VINF_SUCCESS;
5766}
5767
5768
5769/**
5770 * Exports certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
5771 * areas.
5772 *
5773 * These MSRs will automatically be loaded to the host CPU on every successful
5774 * VM-entry and stored from the host CPU on every successful VM-exit.
5775 *
5776 * We creates/updates MSR slots for the host MSRs in the VM-exit MSR-load area. The
5777 * actual host MSR values are not- updated here for performance reasons. See
5778 * hmR0VmxExportHostMsrs().
5779 *
5780 * We also exports the guest sysenter MSRs into the guest-state area in the VMCS.
5781 *
5782 * @returns VBox status code.
5783 * @param pVCpu The cross context virtual CPU structure.
5784 * @param pVmxTransient The VMX-transient structure.
5785 *
5786 * @remarks No-long-jump zone!!!
5787 */
5788static int hmR0VmxExportGuestMsrs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
5789{
5790 AssertPtr(pVCpu);
5791 AssertPtr(pVmxTransient);
5792
5793 PVM pVM = pVCpu->CTX_SUFF(pVM);
5794 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5795
5796 /*
5797 * MSRs that we use the auto-load/store MSR area in the VMCS.
5798 * For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs().
5799 * The host MSR values are updated when it's safe in hmR0VmxLazySaveHostMsrs().
5800 *
5801 * For nested-guests, the guests MSRs from the VM-entry MSR-load area are already
5802 * loaded (into the guest-CPU context) by the VMLAUNCH/VMRESUME instruction
5803 * emulation, nothing to do here.
5804 */
5805 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
5806 {
5807 if ( !pVmxTransient->fIsNestedGuest
5808 && pVM->hm.s.fAllow64BitGuests)
5809 {
5810#if HC_ARCH_BITS == 32
5811 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_KERNEL_GS_BASE);
5812 Assert(!pVmxTransient->fIsNestedGuest);
5813
5814 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_LSTAR, pCtx->msrLSTAR, true, false);
5815 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_STAR, pCtx->msrSTAR, true, false);
5816 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_SF_MASK, pCtx->msrSFMASK, true, false);
5817 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE, true, false);
5818 AssertRCReturn(rc, rc);
5819#endif
5820 }
5821 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_AUTO_MSRS);
5822 }
5823
5824 /*
5825 * Guest Sysenter MSRs.
5826 */
5827 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
5828 {
5829 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
5830
5831 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
5832 {
5833 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pCtx->SysEnter.cs);
5834 AssertRCReturn(rc, rc);
5835 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR);
5836 }
5837
5838 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
5839 {
5840 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip);
5841 AssertRCReturn(rc, rc);
5842 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
5843 }
5844
5845 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
5846 {
5847 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp);
5848 AssertRCReturn(rc, rc);
5849 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
5850 }
5851 }
5852
5853 /*
5854 * Guest/host EFER MSR.
5855 */
5856 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR)
5857 {
5858 /* Whether we are using the VMCS to swap the EFER MSR must have been
5859 determined earlier while exporting VM-entry/VM-exit controls. */
5860 Assert(!(ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS));
5861 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
5862
5863 if (hmR0VmxShouldSwapEferMsr(pVCpu))
5864 {
5865 /*
5866 * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
5867 * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
5868 */
5869 if (pVM->hm.s.vmx.fSupportsVmcsEfer)
5870 {
5871 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pCtx->msrEFER);
5872 AssertRCReturn(rc, rc);
5873 }
5874 else
5875 {
5876 /*
5877 * We shall use the auto-load/store MSR area only for loading the EFER MSR but we must
5878 * continue to intercept guest read and write accesses to it, see @bugref{7386#c16}.
5879 */
5880 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER, pCtx->msrEFER,
5881 false /* fSetReadWrite */, false /* fUpdateHostMsr */);
5882 AssertRCReturn(rc, rc);
5883 }
5884 }
5885 else if (!pVM->hm.s.vmx.fSupportsVmcsEfer)
5886 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER);
5887
5888 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);
5889 }
5890
5891 /*
5892 * Other MSRs.
5893 * Speculation Control (R/W).
5894 */
5895 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_OTHER_MSRS)
5896 {
5897 HMVMX_CPUMCTX_ASSERT(pVCpu, HM_CHANGED_GUEST_OTHER_MSRS);
5898 if (pVM->cpum.ro.GuestFeatures.fIbrs)
5899 {
5900 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu),
5901 false /* fSetReadWrite */, false /* fUpdateHostMsr */);
5902 AssertRCReturn(rc, rc);
5903 }
5904 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS);
5905 }
5906
5907 return VINF_SUCCESS;
5908}
5909
5910
5911/**
5912 * Selects up the appropriate function to run guest code.
5913 *
5914 * @returns VBox status code.
5915 * @param pVCpu The cross context virtual CPU structure.
5916 * @param pVmxTransient The VMX-transient structure.
5917 *
5918 * @remarks No-long-jump zone!!!
5919 */
5920static int hmR0VmxSelectVMRunHandler(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
5921{
5922 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5923 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5924
5925 if (CPUMIsGuestInLongModeEx(pCtx))
5926 {
5927#ifndef VBOX_ENABLE_64_BITS_GUESTS
5928 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
5929#endif
5930 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
5931#if HC_ARCH_BITS == 32
5932 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
5933 if (pVmcsInfo->pfnStartVM != VMXR0SwitcherStartVM64)
5934 {
5935#ifdef VBOX_STRICT
5936 if (pVmcsInfo->pfnStartVM != NULL) /* Very first VM-entry would have saved host-state already, ignore it. */
5937 {
5938 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
5939 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
5940 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
5941 AssertMsg(fCtxChanged & (HM_CHANGED_VMX_ENTRY_EXIT_CTLS | HM_CHANGED_GUEST_EFER_MSR),
5942 ("fCtxChanged=%#RX64\n", fCtxChanged));
5943 }
5944#endif
5945 pVmcsInfo->pfnStartVM = VMXR0SwitcherStartVM64;
5946
5947 /* Mark that we've switched to 64-bit handler, we can't safely switch back to 32-bit for
5948 the rest of the VM run (until VM reset). See @bugref{8432#c7}. */
5949 pVmcsInfo->fSwitchedTo64on32 = true;
5950 Log4Func(("Selected 64-bit switcher\n"));
5951 }
5952#else
5953 /* 64-bit host. */
5954 pVmcsInfo->pfnStartVM = VMXR0StartVM64;
5955#endif
5956 }
5957 else
5958 {
5959 /* Guest is not in long mode, use the 32-bit handler. */
5960#if HC_ARCH_BITS == 32
5961 if ( pVmcsInfo->pfnStartVM != VMXR0StartVM32
5962 && !pVmcsInfo->fSwitchedTo64on32 /* If set, guest mode change does not imply switcher change. */
5963 && pVmcsInfo->pfnStartVM != NULL) /* Very first VM-entry would have saved host-state already, ignore it. */
5964 {
5965# ifdef VBOX_STRICT
5966 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
5967 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
5968 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
5969 AssertMsg(fCtxChanged & (HM_CHANGED_VMX_ENTRY_EXIT_CTLS | HM_CHANGED_GUEST_EFER_MSR),
5970 ("fCtxChanged=%#RX64\n", fCtxChanged));
5971# endif
5972 }
5973# ifdef VBOX_ENABLE_64_BITS_GUESTS
5974 /*
5975 * Keep using the 64-bit switcher even though we're in 32-bit because of bad Intel
5976 * design, see @bugref{8432#c7}. If real-on-v86 mode is active, clear the 64-bit
5977 * switcher flag now because we know the guest is in a sane state where it's safe
5978 * to use the 32-bit switcher. Otherwise, check the guest state if it's safe to use
5979 * the much faster 32-bit switcher again.
5980 */
5981 if (!pVmcsInfo->fSwitchedTo64on32)
5982 {
5983 if (pVmcsInfo->pfnStartVM != VMXR0StartVM32)
5984 Log4Func(("Selected 32-bit switcher\n"));
5985 pVmcsInfo->pfnStartVM = VMXR0StartVM32;
5986 }
5987 else
5988 {
5989 Assert(pVmcsInfo->pfnStartVM == VMXR0SwitcherStartVM64);
5990 if ( pVmcsInfo->RealMode.fRealOnV86Active
5991 || hmR0VmxIs32BitSwitcherSafe(pCtx))
5992 {
5993 pVmcsInfo->fSwitchedTo64on32 = false;
5994 pVmcsInfo->pfnStartVM = VMXR0StartVM32;
5995 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR
5996 | HM_CHANGED_VMX_ENTRY_EXIT_CTLS
5997 | HM_CHANGED_HOST_CONTEXT);
5998 Log4Func(("Selected 32-bit switcher (safe)\n"));
5999 }
6000 }
6001# else
6002 pVmcsInfo->pfnStartVM = VMXR0StartVM32;
6003# endif
6004#else
6005 pVmcsInfo->pfnStartVM = VMXR0StartVM32;
6006#endif
6007 }
6008 Assert(pVmcsInfo->pfnStartVM);
6009 return VINF_SUCCESS;
6010}
6011
6012
6013/**
6014 * Wrapper for running the guest code in VT-x.
6015 *
6016 * @returns VBox status code, no informational status codes.
6017 * @param pVCpu The cross context virtual CPU structure.
6018 * @param pVmxTransient The VMX-transient structure.
6019 *
6020 * @remarks No-long-jump zone!!!
6021 */
6022DECLINLINE(int) hmR0VmxRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
6023{
6024 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
6025 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6026 pCtx->fExtrn |= HMVMX_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
6027
6028 /** @todo Add stats for VMRESUME vs VMLAUNCH. */
6029
6030 /*
6031 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses
6032 * floating-point operations using SSE instructions. Some XMM registers (XMM6-XMM15) are
6033 * callee-saved and thus the need for this XMM wrapper.
6034 *
6035 * See MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage".
6036 */
6037 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6038 bool const fResumeVM = RT_BOOL(pVmcsInfo->fVmcsState & VMX_V_VMCS_LAUNCH_STATE_LAUNCHED);
6039 PVM pVM = pVCpu->CTX_SUFF(pVM);
6040#ifdef VBOX_WITH_KERNEL_USING_XMM
6041 int rc = hmR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VmcsCache, pVM, pVCpu, pVmcsInfo->pfnStartVM);
6042#else
6043 int rc = pVmcsInfo->pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VmcsCache, pVM, pVCpu);
6044#endif
6045 AssertMsg(rc <= VINF_SUCCESS, ("%Rrc\n", rc));
6046 return rc;
6047}
6048
6049
6050/**
6051 * Reports world-switch error and dumps some useful debug info.
6052 *
6053 * @param pVCpu The cross context virtual CPU structure.
6054 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
6055 * @param pVmxTransient The VMX-transient structure (only
6056 * exitReason updated).
6057 */
6058static void hmR0VmxReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun, PVMXTRANSIENT pVmxTransient)
6059{
6060 Assert(pVCpu);
6061 Assert(pVmxTransient);
6062 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
6063
6064 Log4Func(("VM-entry failure: %Rrc\n", rcVMRun));
6065 switch (rcVMRun)
6066 {
6067 case VERR_VMX_INVALID_VMXON_PTR:
6068 AssertFailed();
6069 break;
6070 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
6071 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
6072 {
6073 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
6074 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
6075 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
6076 AssertRC(rc);
6077
6078 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
6079 /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted().
6080 Cannot do it here as we may have been long preempted. */
6081
6082#ifdef VBOX_STRICT
6083 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
6084 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
6085 pVmxTransient->uExitReason));
6086 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQual));
6087 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
6088 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
6089 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
6090 else
6091 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
6092 Log4(("Entered host CPU %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));
6093 Log4(("Current host CPU %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));
6094
6095 /* VMX control bits. */
6096 uint32_t u32Val;
6097 uint64_t u64Val;
6098 RTHCUINTREG uHCReg;
6099 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
6100 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
6101 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
6102 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
6103 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
6104 {
6105 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
6106 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
6107 }
6108 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
6109 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
6110 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
6111 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
6112 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
6113 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
6114 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
6115 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
6116 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
6117 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
6118 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
6119 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
6120 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
6121 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
6122 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
6123 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
6124 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
6125 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
6126 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
6127 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
6128 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
6129 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
6130 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
6131 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
6132 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
6133 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
6134 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
6135 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
6136 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
6137 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
6138 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
6139 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
6140 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
6141 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
6142 if (pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
6143 {
6144 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
6145 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
6146 }
6147
6148 /* Guest bits. */
6149 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc);
6150 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pVCpu->cpum.GstCtx.rip, u64Val));
6151 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc);
6152 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pVCpu->cpum.GstCtx.rsp, u64Val));
6153 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
6154 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pVCpu->cpum.GstCtx.eflags.u32, u32Val));
6155 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid)
6156 {
6157 rc = VMXReadVmcs32(VMX_VMCS16_VPID, &u32Val); AssertRC(rc);
6158 Log4(("VMX_VMCS16_VPID %u\n", u32Val));
6159 }
6160
6161 /* Host bits. */
6162 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
6163 Log4(("Host CR0 %#RHr\n", uHCReg));
6164 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
6165 Log4(("Host CR3 %#RHr\n", uHCReg));
6166 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
6167 Log4(("Host CR4 %#RHr\n", uHCReg));
6168
6169 RTGDTR HostGdtr;
6170 PCX86DESCHC pDesc;
6171 ASMGetGDTR(&HostGdtr);
6172 rc = VMXReadVmcs32(VMX_VMCS16_HOST_CS_SEL, &u32Val); AssertRC(rc);
6173 Log4(("Host CS %#08x\n", u32Val));
6174 if (u32Val < HostGdtr.cbGdt)
6175 {
6176 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
6177 hmR0DumpDescriptor(pDesc, u32Val, "CS: ");
6178 }
6179
6180 rc = VMXReadVmcs32(VMX_VMCS16_HOST_DS_SEL, &u32Val); AssertRC(rc);
6181 Log4(("Host DS %#08x\n", u32Val));
6182 if (u32Val < HostGdtr.cbGdt)
6183 {
6184 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
6185 hmR0DumpDescriptor(pDesc, u32Val, "DS: ");
6186 }
6187
6188 rc = VMXReadVmcs32(VMX_VMCS16_HOST_ES_SEL, &u32Val); AssertRC(rc);
6189 Log4(("Host ES %#08x\n", u32Val));
6190 if (u32Val < HostGdtr.cbGdt)
6191 {
6192 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
6193 hmR0DumpDescriptor(pDesc, u32Val, "ES: ");
6194 }
6195
6196 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FS_SEL, &u32Val); AssertRC(rc);
6197 Log4(("Host FS %#08x\n", u32Val));
6198 if (u32Val < HostGdtr.cbGdt)
6199 {
6200 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
6201 hmR0DumpDescriptor(pDesc, u32Val, "FS: ");
6202 }
6203
6204 rc = VMXReadVmcs32(VMX_VMCS16_HOST_GS_SEL, &u32Val); AssertRC(rc);
6205 Log4(("Host GS %#08x\n", u32Val));
6206 if (u32Val < HostGdtr.cbGdt)
6207 {
6208 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
6209 hmR0DumpDescriptor(pDesc, u32Val, "GS: ");
6210 }
6211
6212 rc = VMXReadVmcs32(VMX_VMCS16_HOST_SS_SEL, &u32Val); AssertRC(rc);
6213 Log4(("Host SS %#08x\n", u32Val));
6214 if (u32Val < HostGdtr.cbGdt)
6215 {
6216 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
6217 hmR0DumpDescriptor(pDesc, u32Val, "SS: ");
6218 }
6219
6220 rc = VMXReadVmcs32(VMX_VMCS16_HOST_TR_SEL, &u32Val); AssertRC(rc);
6221 Log4(("Host TR %#08x\n", u32Val));
6222 if (u32Val < HostGdtr.cbGdt)
6223 {
6224 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
6225 hmR0DumpDescriptor(pDesc, u32Val, "TR: ");
6226 }
6227
6228 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
6229 Log4(("Host TR Base %#RHv\n", uHCReg));
6230 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
6231 Log4(("Host GDTR Base %#RHv\n", uHCReg));
6232 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
6233 Log4(("Host IDTR Base %#RHv\n", uHCReg));
6234 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
6235 Log4(("Host SYSENTER CS %#08x\n", u32Val));
6236 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
6237 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg));
6238 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
6239 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg));
6240 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
6241 Log4(("Host RSP %#RHv\n", uHCReg));
6242 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
6243 Log4(("Host RIP %#RHv\n", uHCReg));
6244# if HC_ARCH_BITS == 64
6245 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
6246 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
6247 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
6248 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
6249 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
6250 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
6251# endif
6252#endif /* VBOX_STRICT */
6253 break;
6254 }
6255
6256 default:
6257 /* Impossible */
6258 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
6259 break;
6260 }
6261}
6262
6263
6264#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
6265# ifndef VMX_USE_CACHED_VMCS_ACCESSES
6266# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
6267# endif
6268
6269/**
6270 * Initialize the VMCS-Read cache.
6271 *
6272 * The VMCS cache is used for 32-bit hosts running 64-bit guests (except 32-bit
6273 * Darwin which runs with 64-bit paging in 32-bit mode) for 64-bit fields that
6274 * cannot be accessed in 32-bit mode. Some 64-bit fields -can- be accessed
6275 * (those that have a 32-bit FULL & HIGH part).
6276 *
6277 * @param pVCpu The cross context virtual CPU structure.
6278 */
6279static void hmR0VmxInitVmcsReadCache(PVMCPU pVCpu)
6280{
6281#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
6282 do { \
6283 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
6284 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
6285 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
6286 ++cReadFields; \
6287 } while (0)
6288
6289 PVMXVMCSCACHE pCache = &pVCpu->hm.s.vmx.VmcsCache;
6290 uint32_t cReadFields = 0;
6291
6292 /*
6293 * Don't remove the #if 0'd fields in this code. They're listed here for consistency
6294 * and serve to indicate exceptions to the rules.
6295 */
6296
6297 /* Guest-natural selector base fields. */
6298#if 0
6299 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
6300 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
6301 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
6302#endif
6303 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
6304 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
6305 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
6306 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
6307 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
6308 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
6309 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
6310 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
6311 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
6312 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
6313 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
6314 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
6315#if 0
6316 /* Unused natural width guest-state fields. */
6317 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS);
6318 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in nested paging case */
6319#endif
6320 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
6321 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
6322
6323 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for
6324 these 64-bit fields (using "FULL" and "HIGH" fields). */
6325#if 0
6326 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
6327 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
6328 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
6329 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
6330 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
6331 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
6332 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
6333 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
6334 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
6335#endif
6336
6337 /* Natural width guest-state fields. */
6338 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
6339 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_GUEST_LINEAR_ADDR);
6340
6341 if (pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging)
6342 {
6343 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
6344 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
6345 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
6346 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
6347 }
6348 else
6349 {
6350 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
6351 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
6352 }
6353
6354#undef VMXLOCAL_INIT_READ_CACHE_FIELD
6355}
6356
6357
6358/**
6359 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
6360 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
6361 * darwin, running 64-bit guests).
6362 *
6363 * @returns VBox status code.
6364 * @param pVCpu The cross context virtual CPU structure.
6365 * @param idxField The VMCS field encoding.
6366 * @param u64Val 16, 32 or 64-bit value.
6367 */
6368VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
6369{
6370 int rc;
6371 switch (idxField)
6372 {
6373 /*
6374 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
6375 */
6376 /* 64-bit Control fields. */
6377 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
6378 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
6379 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
6380 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
6381 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
6382 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
6383 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
6384 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
6385 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
6386 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
6387 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
6388 case VMX_VMCS64_CTRL_EPTP_FULL:
6389 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
6390 /* 64-bit Guest-state fields. */
6391 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
6392 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
6393 case VMX_VMCS64_GUEST_PAT_FULL:
6394 case VMX_VMCS64_GUEST_EFER_FULL:
6395 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
6396 case VMX_VMCS64_GUEST_PDPTE0_FULL:
6397 case VMX_VMCS64_GUEST_PDPTE1_FULL:
6398 case VMX_VMCS64_GUEST_PDPTE2_FULL:
6399 case VMX_VMCS64_GUEST_PDPTE3_FULL:
6400 /* 64-bit Host-state fields. */
6401 case VMX_VMCS64_HOST_PAT_FULL:
6402 case VMX_VMCS64_HOST_EFER_FULL:
6403 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
6404 {
6405 rc = VMXWriteVmcs32(idxField, RT_LO_U32(u64Val));
6406 rc |= VMXWriteVmcs32(idxField + 1, RT_HI_U32(u64Val));
6407 break;
6408 }
6409
6410 /*
6411 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
6412 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
6413 */
6414 /* Natural-width Guest-state fields. */
6415 case VMX_VMCS_GUEST_CR3:
6416 case VMX_VMCS_GUEST_ES_BASE:
6417 case VMX_VMCS_GUEST_CS_BASE:
6418 case VMX_VMCS_GUEST_SS_BASE:
6419 case VMX_VMCS_GUEST_DS_BASE:
6420 case VMX_VMCS_GUEST_FS_BASE:
6421 case VMX_VMCS_GUEST_GS_BASE:
6422 case VMX_VMCS_GUEST_LDTR_BASE:
6423 case VMX_VMCS_GUEST_TR_BASE:
6424 case VMX_VMCS_GUEST_GDTR_BASE:
6425 case VMX_VMCS_GUEST_IDTR_BASE:
6426 case VMX_VMCS_GUEST_RSP:
6427 case VMX_VMCS_GUEST_RIP:
6428 case VMX_VMCS_GUEST_SYSENTER_ESP:
6429 case VMX_VMCS_GUEST_SYSENTER_EIP:
6430 {
6431 if (!(RT_HI_U32(u64Val)))
6432 {
6433 /* If this field is 64-bit, VT-x will zero out the top bits. */
6434 rc = VMXWriteVmcs32(idxField, RT_LO_U32(u64Val));
6435 }
6436 else
6437 {
6438 /* Assert that only the 32->64 switcher case should ever come here. */
6439 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
6440 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
6441 }
6442 break;
6443 }
6444
6445 default:
6446 {
6447 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
6448 pVCpu->hm.s.u32HMError = idxField;
6449 rc = VERR_INVALID_PARAMETER;
6450 break;
6451 }
6452 }
6453 AssertRCReturn(rc, rc);
6454 return rc;
6455}
6456
6457
6458/**
6459 * Queue up a VMWRITE by using the VMCS write cache.
6460 * This is only used on 32-bit hosts (except darwin) for 64-bit guests.
6461 *
6462 * @param pVCpu The cross context virtual CPU structure.
6463 * @param idxField The VMCS field encoding.
6464 * @param u64Val 16, 32 or 64-bit value.
6465 */
6466VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
6467{
6468 AssertPtr(pVCpu);
6469 PVMXVMCSCACHE pCache = &pVCpu->hm.s.vmx.VmcsCache;
6470
6471 AssertMsgReturn(pCache->Write.cValidEntries < VMX_VMCS_CACHE_MAX_ENTRY - 1,
6472 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
6473
6474 /* Make sure there are no duplicates. */
6475 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
6476 {
6477 if (pCache->Write.aField[i] == idxField)
6478 {
6479 pCache->Write.aFieldVal[i] = u64Val;
6480 return VINF_SUCCESS;
6481 }
6482 }
6483
6484 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
6485 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
6486 pCache->Write.cValidEntries++;
6487 return VINF_SUCCESS;
6488}
6489#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
6490
6491
6492/**
6493 * Sets up the usage of TSC-offsetting and updates the VMCS.
6494 *
6495 * If offsetting is not possible, cause VM-exits on RDTSC(P)s. Also sets up the
6496 * VMX-preemption timer.
6497 *
6498 * @returns VBox status code.
6499 * @param pVCpu The cross context virtual CPU structure.
6500 * @param pVmxTransient The VMX-transient structure.
6501 *
6502 * @remarks No-long-jump zone!!!
6503 */
6504static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
6505{
6506 bool fOffsettedTsc;
6507 bool fParavirtTsc;
6508 uint64_t uTscOffset;
6509 PVM pVM = pVCpu->CTX_SUFF(pVM);
6510 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
6511
6512 if (pVM->hm.s.vmx.fUsePreemptTimer)
6513 {
6514 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &uTscOffset, &fOffsettedTsc, &fParavirtTsc);
6515
6516 /* Make sure the returned values have sane upper and lower boundaries. */
6517 uint64_t u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
6518 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
6519 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
6520 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
6521
6522 /** @todo r=ramshankar: We need to find a way to integrate nested-guest
6523 * preemption timers here. We probably need to clamp the preemption timer,
6524 * after converting the timer value to the host. */
6525 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
6526 int rc = VMXWriteVmcs32(VMX_VMCS32_PREEMPT_TIMER_VALUE, cPreemptionTickCount);
6527 AssertRC(rc);
6528 }
6529 else
6530 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
6531
6532 if (fParavirtTsc)
6533 {
6534 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
6535 information before every VM-entry, hence disable it for performance sake. */
6536#if 0
6537 int rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
6538 AssertRC(rc);
6539#endif
6540 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
6541 }
6542
6543 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
6544 if ( fOffsettedTsc
6545 && RT_LIKELY(!pVCpu->hm.s.fDebugWantRdTscExit))
6546 {
6547 if (pVmxTransient->fIsNestedGuest)
6548 uTscOffset = CPUMApplyNestedGuestTscOffset(pVCpu, uTscOffset);
6549 if (pVmcsInfo->u64TscOffset != uTscOffset)
6550 {
6551 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, uTscOffset);
6552 AssertRC(rc);
6553 pVmcsInfo->u64TscOffset = uTscOffset;
6554 }
6555
6556 if (uProcCtls & VMX_PROC_CTLS_RDTSC_EXIT)
6557 {
6558 uProcCtls &= ~VMX_PROC_CTLS_RDTSC_EXIT;
6559 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
6560 AssertRC(rc);
6561 pVmcsInfo->u32ProcCtls = uProcCtls;
6562 }
6563 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
6564 }
6565 else
6566 {
6567 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
6568 if (!(uProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))
6569 {
6570 uProcCtls |= VMX_PROC_CTLS_RDTSC_EXIT;
6571 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
6572 AssertRC(rc);
6573 pVmcsInfo->u32ProcCtls = uProcCtls;
6574 }
6575 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
6576 }
6577}
6578
6579
6580/**
6581 * Gets the IEM exception flags for the specified vector and IDT vectoring /
6582 * VM-exit interruption info type.
6583 *
6584 * @returns The IEM exception flags.
6585 * @param uVector The event vector.
6586 * @param uVmxEventType The VMX event type.
6587 *
6588 * @remarks This function currently only constructs flags required for
6589 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
6590 * and CR2 aspects of an exception are not included).
6591 */
6592static uint32_t hmR0VmxGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
6593{
6594 uint32_t fIemXcptFlags;
6595 switch (uVmxEventType)
6596 {
6597 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
6598 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
6599 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
6600 break;
6601
6602 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
6603 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
6604 break;
6605
6606 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
6607 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
6608 break;
6609
6610 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
6611 {
6612 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
6613 if (uVector == X86_XCPT_BP)
6614 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
6615 else if (uVector == X86_XCPT_OF)
6616 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
6617 else
6618 {
6619 fIemXcptFlags = 0;
6620 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
6621 }
6622 break;
6623 }
6624
6625 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
6626 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
6627 break;
6628
6629 default:
6630 fIemXcptFlags = 0;
6631 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
6632 break;
6633 }
6634 return fIemXcptFlags;
6635}
6636
6637
6638/**
6639 * Sets an event as a pending event to be injected into the guest.
6640 *
6641 * @param pVCpu The cross context virtual CPU structure.
6642 * @param u32IntInfo The VM-entry interruption-information field.
6643 * @param cbInstr The VM-entry instruction length in bytes (for software
6644 * interrupts, exceptions and privileged software
6645 * exceptions).
6646 * @param u32ErrCode The VM-entry exception error code.
6647 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
6648 * page-fault.
6649 */
6650DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
6651 RTGCUINTPTR GCPtrFaultAddress)
6652{
6653 Assert(!pVCpu->hm.s.Event.fPending);
6654 pVCpu->hm.s.Event.fPending = true;
6655 pVCpu->hm.s.Event.u64IntInfo = u32IntInfo;
6656 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
6657 pVCpu->hm.s.Event.cbInstr = cbInstr;
6658 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
6659}
6660
6661
6662/**
6663 * Sets an external interrupt as pending-for-injection into the VM.
6664 *
6665 * @param pVCpu The cross context virtual CPU structure.
6666 * @param u8Interrupt The external interrupt vector.
6667 */
6668DECLINLINE(void) hmR0VmxSetPendingExtInt(PVMCPU pVCpu, uint8_t u8Interrupt)
6669{
6670 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
6671 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
6672 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
6673 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
6674 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6675}
6676
6677
6678/**
6679 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
6680 *
6681 * @param pVCpu The cross context virtual CPU structure.
6682 */
6683DECLINLINE(void) hmR0VmxSetPendingXcptNmi(PVMCPU pVCpu)
6684{
6685 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
6686 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
6687 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
6688 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
6689 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6690}
6691
6692
6693/**
6694 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
6695 *
6696 * @param pVCpu The cross context virtual CPU structure.
6697 */
6698DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu)
6699{
6700 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
6701 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
6702 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
6703 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
6704 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6705}
6706
6707
6708/**
6709 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
6710 *
6711 * @param pVCpu The cross context virtual CPU structure.
6712 */
6713DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu)
6714{
6715 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
6716 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
6717 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
6718 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
6719 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6720}
6721
6722
6723/**
6724 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
6725 *
6726 * @param pVCpu The cross context virtual CPU structure.
6727 */
6728DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu)
6729{
6730 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
6731 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
6732 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
6733 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
6734 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6735}
6736
6737
6738#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6739/**
6740 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
6741 *
6742 * @param pVCpu The cross context virtual CPU structure.
6743 * @param u32ErrCode The error code for the general-protection exception.
6744 */
6745DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPU pVCpu, uint32_t u32ErrCode)
6746{
6747 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
6748 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
6749 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
6750 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
6751 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
6752}
6753
6754
6755/**
6756 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
6757 *
6758 * @param pVCpu The cross context virtual CPU structure.
6759 * @param u32ErrCode The error code for the stack exception.
6760 */
6761DECLINLINE(void) hmR0VmxSetPendingXcptSS(PVMCPU pVCpu, uint32_t u32ErrCode)
6762{
6763 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
6764 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
6765 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
6766 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
6767 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
6768}
6769
6770
6771/**
6772 * Decodes the memory operand of an instruction that caused a VM-exit.
6773 *
6774 * The VM-exit qualification field provides the displacement field for memory
6775 * operand instructions, if any.
6776 *
6777 * @returns Strict VBox status code (i.e. informational status codes too).
6778 * @retval VINF_SUCCESS if the operand was successfully decoded.
6779 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
6780 * operand.
6781 * @param pVCpu The cross context virtual CPU structure.
6782 * @param uExitInstrInfo The VM-exit instruction information field.
6783 * @param enmMemAccess The memory operand's access type (read or write).
6784 * @param GCPtrDisp The instruction displacement field, if any. For
6785 * RIP-relative addressing pass RIP + displacement here.
6786 * @param pGCPtrMem Where to store the effective destination memory address.
6787 */
6788static VBOXSTRICTRC hmR0VmxDecodeMemOperand(PVMCPU pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
6789 PRTGCPTR pGCPtrMem)
6790{
6791 Assert(pGCPtrMem);
6792 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
6793 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
6794 | CPUMCTX_EXTRN_CR0);
6795
6796 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6797 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
6798 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
6799
6800 VMXEXITINSTRINFO ExitInstrInfo;
6801 ExitInstrInfo.u = uExitInstrInfo;
6802 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
6803 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
6804 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
6805 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
6806 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
6807 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
6808 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
6809 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
6810 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
6811
6812 /*
6813 * Validate instruction information.
6814 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
6815 */
6816 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
6817 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
6818 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
6819 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
6820 AssertLogRelMsgReturn(fIsMemOperand,
6821 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
6822
6823 /*
6824 * Compute the complete effective address.
6825 *
6826 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
6827 * See AMD spec. 4.5.2 "Segment Registers".
6828 */
6829 RTGCPTR GCPtrMem = GCPtrDisp;
6830 if (fBaseRegValid)
6831 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
6832 if (fIdxRegValid)
6833 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
6834
6835 RTGCPTR const GCPtrOff = GCPtrMem;
6836 if ( !fIsLongMode
6837 || iSegReg >= X86_SREG_FS)
6838 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6839 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
6840
6841 /*
6842 * Validate effective address.
6843 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
6844 */
6845 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
6846 Assert(cbAccess > 0);
6847 if (fIsLongMode)
6848 {
6849 if (X86_IS_CANONICAL(GCPtrMem))
6850 {
6851 *pGCPtrMem = GCPtrMem;
6852 return VINF_SUCCESS;
6853 }
6854
6855 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
6856 * "Data Limit Checks in 64-bit Mode". */
6857 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
6858 hmR0VmxSetPendingXcptGP(pVCpu, 0);
6859 return VINF_HM_PENDING_XCPT;
6860 }
6861
6862 /*
6863 * This is a watered down version of iemMemApplySegment().
6864 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
6865 * and segment CPL/DPL checks are skipped.
6866 */
6867 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
6868 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
6869 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6870
6871 /* Check if the segment is present and usable. */
6872 if ( pSel->Attr.n.u1Present
6873 && !pSel->Attr.n.u1Unusable)
6874 {
6875 Assert(pSel->Attr.n.u1DescType);
6876 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
6877 {
6878 /* Check permissions for the data segment. */
6879 if ( enmMemAccess == VMXMEMACCESS_WRITE
6880 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
6881 {
6882 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6883 hmR0VmxSetPendingXcptGP(pVCpu, iSegReg);
6884 return VINF_HM_PENDING_XCPT;
6885 }
6886
6887 /* Check limits if it's a normal data segment. */
6888 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
6889 {
6890 if ( GCPtrFirst32 > pSel->u32Limit
6891 || GCPtrLast32 > pSel->u32Limit)
6892 {
6893 Log4Func(("Data segment limit exceeded."
6894 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6895 GCPtrLast32, pSel->u32Limit));
6896 if (iSegReg == X86_SREG_SS)
6897 hmR0VmxSetPendingXcptSS(pVCpu, 0);
6898 else
6899 hmR0VmxSetPendingXcptGP(pVCpu, 0);
6900 return VINF_HM_PENDING_XCPT;
6901 }
6902 }
6903 else
6904 {
6905 /* Check limits if it's an expand-down data segment.
6906 Note! The upper boundary is defined by the B bit, not the G bit! */
6907 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
6908 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
6909 {
6910 Log4Func(("Expand-down data segment limit exceeded."
6911 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
6912 GCPtrLast32, pSel->u32Limit));
6913 if (iSegReg == X86_SREG_SS)
6914 hmR0VmxSetPendingXcptSS(pVCpu, 0);
6915 else
6916 hmR0VmxSetPendingXcptGP(pVCpu, 0);
6917 return VINF_HM_PENDING_XCPT;
6918 }
6919 }
6920 }
6921 else
6922 {
6923 /* Check permissions for the code segment. */
6924 if ( enmMemAccess == VMXMEMACCESS_WRITE
6925 || ( enmMemAccess == VMXMEMACCESS_READ
6926 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
6927 {
6928 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
6929 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
6930 hmR0VmxSetPendingXcptGP(pVCpu, 0);
6931 return VINF_HM_PENDING_XCPT;
6932 }
6933
6934 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
6935 if ( GCPtrFirst32 > pSel->u32Limit
6936 || GCPtrLast32 > pSel->u32Limit)
6937 {
6938 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
6939 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
6940 if (iSegReg == X86_SREG_SS)
6941 hmR0VmxSetPendingXcptSS(pVCpu, 0);
6942 else
6943 hmR0VmxSetPendingXcptGP(pVCpu, 0);
6944 return VINF_HM_PENDING_XCPT;
6945 }
6946 }
6947 }
6948 else
6949 {
6950 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
6951 hmR0VmxSetPendingXcptGP(pVCpu, 0);
6952 return VINF_HM_PENDING_XCPT;
6953 }
6954
6955 *pGCPtrMem = GCPtrMem;
6956 return VINF_SUCCESS;
6957}
6958
6959
6960/**
6961 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
6962 * guest attempting to execute a VMX instruction.
6963 *
6964 * @returns Strict VBox status code (i.e. informational status codes too).
6965 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
6966 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
6967 *
6968 * @param pVCpu The cross context virtual CPU structure.
6969 * @param uExitReason The VM-exit reason.
6970 *
6971 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
6972 * @remarks No-long-jump zone!!!
6973 */
6974static VBOXSTRICTRC hmR0VmxCheckExitDueToVmxInstr(PVMCPU pVCpu, uint32_t uExitReason)
6975{
6976 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
6977 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
6978
6979 if ( CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx)
6980 || ( CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
6981 && !CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx)))
6982 {
6983 Log4Func(("In real/v86-mode or long-mode outside 64-bit code segment -> #UD\n"));
6984 hmR0VmxSetPendingXcptUD(pVCpu);
6985 return VINF_HM_PENDING_XCPT;
6986 }
6987
6988 if (uExitReason == VMX_EXIT_VMXON)
6989 {
6990 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
6991
6992 /*
6993 * We check CR4.VMXE because it is required to be always set while in VMX operation
6994 * by physical CPUs and our CR4 read shadow is only consulted when executing specific
6995 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
6996 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
6997 */
6998 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
6999 {
7000 Log4Func(("CR4.VMXE is not set -> #UD\n"));
7001 hmR0VmxSetPendingXcptUD(pVCpu);
7002 return VINF_HM_PENDING_XCPT;
7003 }
7004 }
7005 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
7006 {
7007 /*
7008 * The guest has not entered VMX operation but attempted to execute a VMX instruction
7009 * (other than VMXON), we need to raise a #UD.
7010 */
7011 Log4Func(("Not in VMX root mode -> #UD\n"));
7012 hmR0VmxSetPendingXcptUD(pVCpu);
7013 return VINF_HM_PENDING_XCPT;
7014 }
7015
7016 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
7017 {
7018 /*
7019 * The nested-guest attempted to execute a VMX instruction, cause a VM-exit and let
7020 * the guest hypervisor deal with it.
7021 */
7022 /** @todo NSTVMX: Trigger a VM-exit */
7023 }
7024
7025 /*
7026 * VMX instructions require CPL 0 except in VMX non-root mode where the VM-exit intercept
7027 * (above) takes preceedence over the CPL check.
7028 */
7029 if (CPUMGetGuestCPL(pVCpu) > 0)
7030 {
7031 Log4Func(("CPL > 0 -> #GP(0)\n"));
7032 hmR0VmxSetPendingXcptGP(pVCpu, 0);
7033 return VINF_HM_PENDING_XCPT;
7034 }
7035
7036 return VINF_SUCCESS;
7037}
7038#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
7039
7040
7041static void hmR0VmxFixUnusableSegRegAttr(PVMCPU pVCpu, PCPUMSELREG pSelReg, uint32_t idxSel)
7042{
7043 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
7044
7045 /*
7046 * If VT-x marks the segment as unusable, most other bits remain undefined:
7047 * - For CS the L, D and G bits have meaning.
7048 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
7049 * - For the remaining data segments no bits are defined.
7050 *
7051 * The present bit and the unusable bit has been observed to be set at the
7052 * same time (the selector was supposed to be invalid as we started executing
7053 * a V8086 interrupt in ring-0).
7054 *
7055 * What should be important for the rest of the VBox code, is that the P bit is
7056 * cleared. Some of the other VBox code recognizes the unusable bit, but
7057 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
7058 * safe side here, we'll strip off P and other bits we don't care about. If
7059 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
7060 *
7061 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
7062 */
7063#ifdef VBOX_STRICT
7064 uint32_t const uAttr = pSelReg->Attr.u;
7065#endif
7066
7067 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
7068 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
7069 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
7070
7071#ifdef VBOX_STRICT
7072 VMMRZCallRing3Disable(pVCpu);
7073 Log4Func(("Unusable %#x: sel=%#x attr=%#x -> %#x\n", idxSel, pSelReg->Sel, uAttr, pSelReg->Attr.u));
7074# ifdef DEBUG_bird
7075 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
7076 ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
7077 idxSel, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
7078# endif
7079 VMMRZCallRing3Enable(pVCpu);
7080 NOREF(uAttr);
7081#endif
7082 RT_NOREF2(pVCpu, idxSel);
7083}
7084
7085
7086/**
7087 * Imports a guest segment register from the current VMCS into the guest-CPU
7088 * context.
7089 *
7090 * @returns VBox status code.
7091 * @param pVCpu The cross context virtual CPU structure.
7092 * @param iSegReg The segment register number (X86_SREG_XXX).
7093 *
7094 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
7095 * do not log!
7096 */
7097static int hmR0VmxImportGuestSegReg(PVMCPU pVCpu, uint8_t iSegReg)
7098{
7099 Assert(iSegReg < X86_SREG_COUNT);
7100
7101 uint32_t const idxSel = g_aVmcsSegSel[iSegReg];
7102 uint32_t const idxLimit = g_aVmcsSegLimit[iSegReg];
7103 uint32_t const idxAttr = g_aVmcsSegAttr[iSegReg];
7104#ifdef VMX_USE_CACHED_VMCS_ACCESSES
7105 uint32_t const idxBase = g_aVmcsCacheSegBase[iSegReg];
7106#else
7107 uint32_t const idxBase = g_aVmcsSegBase[iSegReg];
7108#endif
7109 uint64_t u64Base;
7110 uint32_t u32Sel, u32Limit, u32Attr;
7111 int rc = VMXReadVmcs32(idxSel, &u32Sel);
7112 rc |= VMXReadVmcs32(idxLimit, &u32Limit);
7113 rc |= VMXReadVmcs32(idxAttr, &u32Attr);
7114 rc |= VMXReadVmcsGstNByIdxVal(idxBase, &u64Base);
7115 if (RT_SUCCESS(rc))
7116 {
7117 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
7118 pSelReg->Sel = u32Sel;
7119 pSelReg->ValidSel = u32Sel;
7120 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
7121 pSelReg->u32Limit = u32Limit;
7122 pSelReg->u64Base = u64Base;
7123 pSelReg->Attr.u = u32Attr;
7124 if (u32Attr & X86DESCATTR_UNUSABLE)
7125 hmR0VmxFixUnusableSegRegAttr(pVCpu, pSelReg, idxSel);
7126 }
7127 return rc;
7128}
7129
7130
7131/**
7132 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
7133 *
7134 * @returns VBox status code.
7135 * @param pVCpu The cross context virtual CPU structure.
7136 *
7137 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
7138 * do not log!
7139 */
7140static int hmR0VmxImportGuestLdtr(PVMCPU pVCpu)
7141{
7142 uint64_t u64Base;
7143 uint32_t u32Sel, u32Limit, u32Attr;
7144 int rc = VMXReadVmcs32(VMX_VMCS16_GUEST_LDTR_SEL, &u32Sel);
7145 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit);
7146 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr);
7147 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, &u64Base);
7148 if (RT_SUCCESS(rc))
7149 {
7150 pVCpu->cpum.GstCtx.ldtr.Sel = u32Sel;
7151 pVCpu->cpum.GstCtx.ldtr.ValidSel = u32Sel;
7152 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
7153 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
7154 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
7155 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
7156 if (u32Attr & X86DESCATTR_UNUSABLE)
7157 hmR0VmxFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, VMX_VMCS16_GUEST_LDTR_SEL);
7158 }
7159 return rc;
7160}
7161
7162
7163/**
7164 * Imports the guest TR from the current VMCS into the guest-CPU context.
7165 *
7166 * @returns VBox status code.
7167 * @param pVCpu The cross context virtual CPU structure.
7168 *
7169 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
7170 * do not log!
7171 */
7172static int hmR0VmxImportGuestTr(PVMCPU pVCpu)
7173{
7174 uint32_t u32Sel, u32Limit, u32Attr;
7175 uint64_t u64Base;
7176 int rc = VMXReadVmcs32(VMX_VMCS16_GUEST_TR_SEL, &u32Sel);
7177 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit);
7178 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr);
7179 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_TR_BASE, &u64Base);
7180 AssertRCReturn(rc, rc);
7181
7182 pVCpu->cpum.GstCtx.tr.Sel = u32Sel;
7183 pVCpu->cpum.GstCtx.tr.ValidSel = u32Sel;
7184 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
7185 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
7186 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
7187 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
7188 /* TR is the only selector that can never be unusable. */
7189 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
7190 return VINF_SUCCESS;
7191}
7192
7193
7194/**
7195 * Imports the guest RIP from the VMCS back into the guest-CPU context.
7196 *
7197 * @returns VBox status code.
7198 * @param pVCpu The cross context virtual CPU structure.
7199 *
7200 * @remarks Called with interrupts and/or preemption disabled, should not assert!
7201 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
7202 * instead!!!
7203 */
7204static int hmR0VmxImportGuestRip(PVMCPU pVCpu)
7205{
7206 uint64_t u64Val;
7207 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7208 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
7209 {
7210 int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
7211 if (RT_SUCCESS(rc))
7212 {
7213 pCtx->rip = u64Val;
7214 EMR0HistoryUpdatePC(pVCpu, pCtx->rip, false);
7215 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
7216 }
7217 return rc;
7218 }
7219 return VINF_SUCCESS;
7220}
7221
7222
7223/**
7224 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
7225 *
7226 * @returns VBox status code.
7227 * @param pVCpu The cross context virtual CPU structure.
7228 * @param pVmcsInfo The VMCS info. object.
7229 *
7230 * @remarks Called with interrupts and/or preemption disabled, should not assert!
7231 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
7232 * instead!!!
7233 */
7234static int hmR0VmxImportGuestRFlags(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
7235{
7236 uint32_t u32Val;
7237 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7238 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
7239 {
7240 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val);
7241 if (RT_SUCCESS(rc))
7242 {
7243 pCtx->eflags.u32 = u32Val;
7244
7245 /* Restore eflags for real-on-v86-mode hack. */
7246 if (pVmcsInfo->RealMode.fRealOnV86Active)
7247 {
7248 pCtx->eflags.Bits.u1VM = 0;
7249 pCtx->eflags.Bits.u2IOPL = pVmcsInfo->RealMode.Eflags.Bits.u2IOPL;
7250 }
7251 }
7252 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
7253 return rc;
7254 }
7255 return VINF_SUCCESS;
7256}
7257
7258
7259/**
7260 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
7261 * context.
7262 *
7263 * @returns VBox status code.
7264 * @param pVCpu The cross context virtual CPU structure.
7265 * @param pVmcsInfo The VMCS info. object.
7266 *
7267 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
7268 * do not log!
7269 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
7270 * instead!!!
7271 */
7272static int hmR0VmxImportGuestIntrState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
7273{
7274 uint32_t u32Val;
7275 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32Val);
7276 if (RT_SUCCESS(rc))
7277 {
7278 if (!u32Val)
7279 {
7280 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7281 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7282
7283 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
7284 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
7285 }
7286 else
7287 {
7288 /*
7289 * We must import RIP here to set our EM interrupt-inhibited state.
7290 * We also import RFLAGS as our code that evaluates pending interrupts
7291 * before VM-entry requires it.
7292 */
7293 rc = hmR0VmxImportGuestRip(pVCpu);
7294 rc |= hmR0VmxImportGuestRFlags(pVCpu, pVmcsInfo);
7295 if (RT_SUCCESS(rc))
7296 {
7297 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
7298 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
7299 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7300 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7301
7302 if (u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
7303 {
7304 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
7305 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
7306 }
7307 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
7308 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
7309 }
7310 }
7311 }
7312 return rc;
7313}
7314
7315
7316/**
7317 * Worker for VMXR0ImportStateOnDemand.
7318 *
7319 * @returns VBox status code.
7320 * @param pVCpu The cross context virtual CPU structure.
7321 * @param pVmcsInfo The VMCS info. object.
7322 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
7323 */
7324static int hmR0VmxImportGuestState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
7325{
7326#define VMXLOCAL_BREAK_RC(a_rc) \
7327 if (RT_SUCCESS(a_rc)) \
7328 { } \
7329 else \
7330 break
7331
7332 int rc = VINF_SUCCESS;
7333 PVM pVM = pVCpu->CTX_SUFF(pVM);
7334 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7335 uint64_t u64Val;
7336 uint32_t u32Val;
7337
7338 /*
7339 * Note! This is hack to workaround a mysterious BSOD observed with release builds
7340 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
7341 * neither are other host platforms.
7342 *
7343 * Committing this temporarily as it prevents BSOD.
7344 *
7345 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
7346 */
7347#ifdef RT_OS_WINDOWS
7348 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
7349 return VERR_HM_IPE_1;
7350#endif
7351
7352 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x);
7353
7354 /*
7355 * We disable interrupts to make the updating of the state and in particular
7356 * the fExtrn modification atomic wrt to preemption hooks.
7357 */
7358 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
7359
7360 fWhat &= pCtx->fExtrn;
7361 if (fWhat)
7362 {
7363 do
7364 {
7365 if (fWhat & CPUMCTX_EXTRN_RIP)
7366 {
7367 rc = hmR0VmxImportGuestRip(pVCpu);
7368 VMXLOCAL_BREAK_RC(rc);
7369 }
7370
7371 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
7372 {
7373 rc = hmR0VmxImportGuestRFlags(pVCpu, pVmcsInfo);
7374 VMXLOCAL_BREAK_RC(rc);
7375 }
7376
7377 if (fWhat & CPUMCTX_EXTRN_HM_VMX_INT_STATE)
7378 {
7379 rc = hmR0VmxImportGuestIntrState(pVCpu, pVmcsInfo);
7380 VMXLOCAL_BREAK_RC(rc);
7381 }
7382
7383 if (fWhat & CPUMCTX_EXTRN_RSP)
7384 {
7385 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
7386 VMXLOCAL_BREAK_RC(rc);
7387 pCtx->rsp = u64Val;
7388 }
7389
7390 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
7391 {
7392 bool const fRealOnV86Active = pVmcsInfo->RealMode.fRealOnV86Active;
7393 if (fWhat & CPUMCTX_EXTRN_CS)
7394 {
7395 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_CS);
7396 rc |= hmR0VmxImportGuestRip(pVCpu);
7397 if (fRealOnV86Active)
7398 pCtx->cs.Attr.u = pVmcsInfo->RealMode.AttrCS.u;
7399 EMR0HistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
7400 }
7401 if (fWhat & CPUMCTX_EXTRN_SS)
7402 {
7403 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_SS);
7404 if (fRealOnV86Active)
7405 pCtx->ss.Attr.u = pVmcsInfo->RealMode.AttrSS.u;
7406 }
7407 if (fWhat & CPUMCTX_EXTRN_DS)
7408 {
7409 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_DS);
7410 if (fRealOnV86Active)
7411 pCtx->ds.Attr.u = pVmcsInfo->RealMode.AttrDS.u;
7412 }
7413 if (fWhat & CPUMCTX_EXTRN_ES)
7414 {
7415 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_ES);
7416 if (fRealOnV86Active)
7417 pCtx->es.Attr.u = pVmcsInfo->RealMode.AttrES.u;
7418 }
7419 if (fWhat & CPUMCTX_EXTRN_FS)
7420 {
7421 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_FS);
7422 if (fRealOnV86Active)
7423 pCtx->fs.Attr.u = pVmcsInfo->RealMode.AttrFS.u;
7424 }
7425 if (fWhat & CPUMCTX_EXTRN_GS)
7426 {
7427 rc |= hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_GS);
7428 if (fRealOnV86Active)
7429 pCtx->gs.Attr.u = pVmcsInfo->RealMode.AttrGS.u;
7430 }
7431 VMXLOCAL_BREAK_RC(rc);
7432 }
7433
7434 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
7435 {
7436 if (fWhat & CPUMCTX_EXTRN_LDTR)
7437 rc |= hmR0VmxImportGuestLdtr(pVCpu);
7438
7439 if (fWhat & CPUMCTX_EXTRN_GDTR)
7440 {
7441 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
7442 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
7443 pCtx->gdtr.pGdt = u64Val;
7444 pCtx->gdtr.cbGdt = u32Val;
7445 }
7446
7447 /* Guest IDTR. */
7448 if (fWhat & CPUMCTX_EXTRN_IDTR)
7449 {
7450 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
7451 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
7452 pCtx->idtr.pIdt = u64Val;
7453 pCtx->idtr.cbIdt = u32Val;
7454 }
7455
7456 /* Guest TR. */
7457 if (fWhat & CPUMCTX_EXTRN_TR)
7458 {
7459 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
7460 don't need to import that one. */
7461 if (!pVmcsInfo->RealMode.fRealOnV86Active)
7462 rc |= hmR0VmxImportGuestTr(pVCpu);
7463 }
7464 VMXLOCAL_BREAK_RC(rc);
7465 }
7466
7467 if (fWhat & CPUMCTX_EXTRN_DR7)
7468 {
7469 if (!pVCpu->hm.s.fUsingHyperDR7)
7470 {
7471 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
7472 rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val);
7473 VMXLOCAL_BREAK_RC(rc);
7474 pCtx->dr[7] = u32Val;
7475 }
7476 }
7477
7478 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
7479 {
7480 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip);
7481 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp);
7482 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val);
7483 pCtx->SysEnter.cs = u32Val;
7484 VMXLOCAL_BREAK_RC(rc);
7485 }
7486
7487#if HC_ARCH_BITS == 64
7488 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
7489 {
7490 if ( pVM->hm.s.fAllow64BitGuests
7491 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
7492 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
7493 }
7494
7495 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
7496 {
7497 if ( pVM->hm.s.fAllow64BitGuests
7498 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
7499 {
7500 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
7501 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
7502 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
7503 }
7504 }
7505#endif
7506
7507 if ( (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
7508#if HC_ARCH_BITS == 32
7509 || (fWhat & (CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS))
7510#endif
7511 )
7512 {
7513 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
7514 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
7515 Assert(pMsrs);
7516 Assert(cMsrs <= VMX_MISC_MAX_MSRS(pVM->hm.s.vmx.Msrs.u64Misc));
7517 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
7518 for (uint32_t i = 0; i < cMsrs; i++)
7519 {
7520 uint32_t const idMsr = pMsrs[i].u32Msr;
7521 switch (idMsr)
7522 {
7523 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
7524 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
7525 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
7526#if HC_ARCH_BITS == 32
7527 case MSR_K8_LSTAR: pCtx->msrLSTAR = pMsrs[i].u64Value; break;
7528 case MSR_K6_STAR: pCtx->msrSTAR = pMsrs[i].u64Value; break;
7529 case MSR_K8_SF_MASK: pCtx->msrSFMASK = pMsrs[i].u64Value; break;
7530 case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsrs[i].u64Value; break;
7531#endif
7532 default:
7533 {
7534 pCtx->fExtrn = 0;
7535 pVCpu->hm.s.u32HMError = pMsrs->u32Msr;
7536 ASMSetFlags(fEFlags);
7537 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
7538 return VERR_HM_UNEXPECTED_LD_ST_MSR;
7539 }
7540 }
7541 }
7542 }
7543
7544 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
7545 {
7546 uint64_t u64Shadow;
7547 if (fWhat & CPUMCTX_EXTRN_CR0)
7548 {
7549 /** @todo r=ramshankar: We only read 32-bits here for legacy/convenience reasons,
7550 * remove when we drop 32-bit host w/ 64-bit host support, see
7551 * @bugref{9180#c39}. */
7552 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val);
7553#if HC_ARCH_BITS == 32
7554 uint32_t u32Shadow;
7555 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u32Shadow);
7556 u64Shadow = u32Shadow;
7557#else
7558 rc |= VMXReadVmcs64(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow);
7559#endif
7560 VMXLOCAL_BREAK_RC(rc);
7561 u64Val = u32Val;
7562 u64Val = (u64Val & ~pVmcsInfo->u64Cr0Mask)
7563 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
7564 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
7565 CPUMSetGuestCR0(pVCpu, u64Val);
7566 VMMRZCallRing3Enable(pVCpu);
7567 }
7568
7569 if (fWhat & CPUMCTX_EXTRN_CR4)
7570 {
7571 /** @todo r=ramshankar: We only read 32-bits here for legacy/convenience reasons,
7572 * remove when we drop 32-bit host w/ 64-bit host support, see
7573 * @bugref{9180#c39}. */
7574 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val);
7575#if HC_ARCH_BITS == 32
7576 uint32_t u32Shadow;
7577 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u32Shadow);
7578 u64Shadow = u32Shadow;
7579#else
7580 rc |= VMXReadVmcs64(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow);
7581#endif
7582 VMXLOCAL_BREAK_RC(rc);
7583 u64Val = u32Val;
7584 u64Val = (u64Val & ~pVmcsInfo->u64Cr4Mask)
7585 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
7586 pCtx->cr4 = u64Val;
7587 }
7588
7589 if (fWhat & CPUMCTX_EXTRN_CR3)
7590 {
7591 /* CR0.PG bit changes are always intercepted, so it's up to date. */
7592 if ( pVM->hm.s.vmx.fUnrestrictedGuest
7593 || ( pVM->hm.s.fNestedPaging
7594 && CPUMIsGuestPagingEnabledEx(pCtx)))
7595 {
7596 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
7597 VMXLOCAL_BREAK_RC(rc);
7598 if (pCtx->cr3 != u64Val)
7599 {
7600 pCtx->cr3 = u64Val;
7601 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
7602 }
7603
7604 /* If the guest is in PAE mode, sync back the PDPE's into the guest state.
7605 Note: CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date. */
7606 if (CPUMIsGuestInPAEModeEx(pCtx))
7607 {
7608 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);
7609 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);
7610 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);
7611 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);
7612 VMXLOCAL_BREAK_RC(rc);
7613 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
7614 }
7615 }
7616 }
7617 }
7618 } while (0);
7619
7620 if (RT_SUCCESS(rc))
7621 {
7622 /* Update fExtrn. */
7623 pCtx->fExtrn &= ~fWhat;
7624
7625 /* If everything has been imported, clear the HM keeper bit. */
7626 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
7627 {
7628 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
7629 Assert(!pCtx->fExtrn);
7630 }
7631 }
7632 }
7633 else
7634 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
7635
7636 ASMSetFlags(fEFlags);
7637
7638 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatImportGuestState, x);
7639
7640 if (RT_SUCCESS(rc))
7641 { /* likely */ }
7642 else
7643 return rc;
7644
7645 /*
7646 * Honor any pending CR3 updates.
7647 *
7648 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
7649 * -> VMMRZCallRing3Disable() -> hmR0VmxImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
7650 * -> continue with VM-exit handling -> hmR0VmxImportGuestState() and here we are.
7651 *
7652 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
7653 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
7654 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
7655 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
7656 *
7657 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
7658 */
7659 if (VMMRZCallRing3IsEnabled(pVCpu))
7660 {
7661 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
7662 {
7663 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
7664 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
7665 }
7666
7667 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
7668 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
7669
7670 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
7671 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
7672 }
7673
7674 return VINF_SUCCESS;
7675#undef VMXLOCAL_BREAK_RC
7676}
7677
7678
7679/**
7680 * Saves the guest state from the VMCS into the guest-CPU context.
7681 *
7682 * @returns VBox status code.
7683 * @param pVCpu The cross context virtual CPU structure.
7684 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
7685 */
7686VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat)
7687{
7688 PCVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
7689 return hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fWhat);
7690}
7691
7692
7693/**
7694 * Check per-VM and per-VCPU force flag actions that require us to go back to
7695 * ring-3 for one reason or another.
7696 *
7697 * @returns Strict VBox status code (i.e. informational status codes too)
7698 * @retval VINF_SUCCESS if we don't have any actions that require going back to
7699 * ring-3.
7700 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
7701 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
7702 * interrupts)
7703 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
7704 * all EMTs to be in ring-3.
7705 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
7706 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
7707 * to the EM loop.
7708 *
7709 * @param pVCpu The cross context virtual CPU structure.
7710 * @param fStepping Whether we are single-stepping the guest using the
7711 * hypervisor debugger.
7712 */
7713static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVMCPU pVCpu, bool fStepping)
7714{
7715 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7716
7717 /*
7718 * Update pending interrupts into the APIC's IRR.
7719 */
7720 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
7721 APICUpdatePendingInterrupts(pVCpu);
7722
7723 /*
7724 * Anything pending? Should be more likely than not if we're doing a good job.
7725 */
7726 PVM pVM = pVCpu->CTX_SUFF(pVM);
7727 if ( !fStepping
7728 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
7729 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
7730 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
7731 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
7732 return VINF_SUCCESS;
7733
7734 /* Pending PGM C3 sync. */
7735 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
7736 {
7737 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7738 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
7739 VBOXSTRICTRC rcStrict2 = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
7740 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
7741 if (rcStrict2 != VINF_SUCCESS)
7742 {
7743 AssertRC(VBOXSTRICTRC_VAL(rcStrict2));
7744 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict2)));
7745 return rcStrict2;
7746 }
7747 }
7748
7749 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
7750 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
7751 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
7752 {
7753 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
7754 int rc2 = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
7755 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
7756 return rc2;
7757 }
7758
7759 /* Pending VM request packets, such as hardware interrupts. */
7760 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
7761 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
7762 {
7763 Log4Func(("Pending VM request forcing us back to ring-3\n"));
7764 return VINF_EM_PENDING_REQUEST;
7765 }
7766
7767 /* Pending PGM pool flushes. */
7768 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
7769 {
7770 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
7771 return VINF_PGM_POOL_FLUSH_PENDING;
7772 }
7773
7774 /* Pending DMA requests. */
7775 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
7776 {
7777 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
7778 return VINF_EM_RAW_TO_R3;
7779 }
7780
7781 return VINF_SUCCESS;
7782}
7783
7784
7785/**
7786 * Converts any TRPM trap into a pending HM event. This is typically used when
7787 * entering from ring-3 (not longjmp returns).
7788 *
7789 * @param pVCpu The cross context virtual CPU structure.
7790 */
7791static void hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
7792{
7793 Assert(TRPMHasTrap(pVCpu));
7794 Assert(!pVCpu->hm.s.Event.fPending);
7795
7796 uint8_t uVector;
7797 TRPMEVENT enmTrpmEvent;
7798 RTGCUINT uErrCode;
7799 RTGCUINTPTR GCPtrFaultAddress;
7800 uint8_t cbInstr;
7801
7802 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
7803 AssertRC(rc);
7804
7805 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntInfo. */
7806 uint32_t u32IntInfo = uVector | VMX_EXIT_INT_INFO_VALID;
7807 if (enmTrpmEvent == TRPM_TRAP)
7808 {
7809 /** @todo r=ramshankar: TRPM currently offers no way to determine a \#DB that was
7810 * generated using INT1 (ICEBP). */
7811 switch (uVector)
7812 {
7813 case X86_XCPT_NMI:
7814 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_NMI << VMX_EXIT_INT_INFO_TYPE_SHIFT);
7815 break;
7816
7817 case X86_XCPT_BP:
7818 case X86_XCPT_OF:
7819 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_SW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
7820 break;
7821
7822 case X86_XCPT_PF:
7823 case X86_XCPT_DF:
7824 case X86_XCPT_TS:
7825 case X86_XCPT_NP:
7826 case X86_XCPT_SS:
7827 case X86_XCPT_GP:
7828 case X86_XCPT_AC:
7829 u32IntInfo |= VMX_EXIT_INT_INFO_ERROR_CODE_VALID;
7830 RT_FALL_THRU();
7831 default:
7832 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_HW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
7833 break;
7834 }
7835 }
7836 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
7837 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_EXT_INT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
7838 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
7839 {
7840 switch (uVector)
7841 {
7842 case X86_XCPT_BP:
7843 case X86_XCPT_OF:
7844 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_SW_XCPT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
7845 break;
7846
7847 default:
7848 Assert(uVector == X86_XCPT_DB);
7849 u32IntInfo |= (VMX_EXIT_INT_INFO_TYPE_SW_INT << VMX_EXIT_INT_INFO_TYPE_SHIFT);
7850 break;
7851 }
7852 }
7853 else
7854 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
7855
7856 rc = TRPMResetTrap(pVCpu);
7857 AssertRC(rc);
7858 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
7859 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
7860
7861 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
7862}
7863
7864
7865/**
7866 * Converts the pending HM event into a TRPM trap.
7867 *
7868 * @param pVCpu The cross context virtual CPU structure.
7869 */
7870static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
7871{
7872 Assert(pVCpu->hm.s.Event.fPending);
7873
7874 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
7875 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntInfo);
7876 bool fErrorCodeValid = VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVCpu->hm.s.Event.u64IntInfo);
7877 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
7878
7879 /* If a trap was already pending, we did something wrong! */
7880 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
7881
7882 /** @todo Use HMVmxEventToTrpmEventType() later. */
7883 TRPMEVENT enmTrapType;
7884 switch (uVectorType)
7885 {
7886 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
7887 enmTrapType = TRPM_HARDWARE_INT;
7888 break;
7889
7890 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
7891 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
7892 enmTrapType = TRPM_TRAP;
7893 break;
7894
7895 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT: /* #DB (INT1/ICEBP). */
7896 Assert(uVector == X86_XCPT_DB);
7897 enmTrapType = TRPM_SOFTWARE_INT;
7898 break;
7899
7900 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP (INT3) and #OF (INTO) */
7901 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
7902 enmTrapType = TRPM_SOFTWARE_INT;
7903 break;
7904
7905 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
7906 enmTrapType = TRPM_SOFTWARE_INT;
7907 break;
7908
7909 default:
7910 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
7911 enmTrapType = TRPM_32BIT_HACK;
7912 break;
7913 }
7914
7915 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
7916
7917 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
7918 AssertRC(rc);
7919
7920 if (fErrorCodeValid)
7921 TRPMSetErrorCode(pVCpu, uErrorCode);
7922
7923 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
7924 && uVector == X86_XCPT_PF)
7925 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
7926 else if (enmTrapType == TRPM_SOFTWARE_INT)
7927 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
7928
7929 /* We're now done converting the pending event. */
7930 pVCpu->hm.s.Event.fPending = false;
7931}
7932
7933
7934/**
7935 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
7936 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
7937 *
7938 * @param pVCpu The cross context virtual CPU structure.
7939 * @param pVmcsInfo The VMCS info. object.
7940 */
7941static void hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
7942{
7943 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
7944 {
7945 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
7946 {
7947 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
7948 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
7949 AssertRC(rc);
7950 }
7951 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
7952}
7953
7954
7955/**
7956 * Clears the interrupt-window exiting control in the VMCS.
7957 *
7958 * @param pVmcsInfo The VMCS info. object.
7959 */
7960DECLINLINE(int) hmR0VmxClearIntWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
7961{
7962 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
7963 {
7964 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
7965 return VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
7966 }
7967 return VINF_SUCCESS;
7968}
7969
7970
7971/**
7972 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
7973 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
7974 *
7975 * @param pVCpu The cross context virtual CPU structure.
7976 * @param pVmcsInfo The VMCS info. object.
7977 */
7978static void hmR0VmxSetNmiWindowExitVmcs(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo)
7979{
7980 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
7981 {
7982 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
7983 {
7984 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
7985 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
7986 AssertRC(rc);
7987 Log4Func(("Setup NMI-window exiting\n"));
7988 }
7989 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
7990}
7991
7992
7993/**
7994 * Clears the NMI-window exiting control in the VMCS.
7995 *
7996 * @param pVmcsInfo The VMCS info. object.
7997 */
7998DECLINLINE(int) hmR0VmxClearNmiWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
7999{
8000 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
8001 {
8002 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
8003 return VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8004 }
8005 return VINF_SUCCESS;
8006}
8007
8008
8009/**
8010 * Does the necessary state syncing before returning to ring-3 for any reason
8011 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
8012 *
8013 * @returns VBox status code.
8014 * @param pVCpu The cross context virtual CPU structure.
8015 * @param fImportState Whether to import the guest state from the VMCS back
8016 * to the guest-CPU context.
8017 *
8018 * @remarks No-long-jmp zone!!!
8019 */
8020static int hmR0VmxLeave(PVMCPU pVCpu, bool fImportState)
8021{
8022 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8023 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8024
8025 RTCPUID idCpu = RTMpCpuId();
8026 Log4Func(("HostCpuId=%u\n", idCpu));
8027
8028 /*
8029 * !!! IMPORTANT !!!
8030 * If you modify code here, check whether hmR0VmxCallRing3Callback() needs to be updated too.
8031 */
8032
8033 /* Save the guest state if necessary. */
8034 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
8035 if (fImportState)
8036 {
8037 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8038 AssertRCReturn(rc, rc);
8039 }
8040
8041 /* Restore host FPU state if necessary. We will resync on next R0 reentry. */
8042 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
8043 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
8044
8045 /* Restore host debug registers if necessary. We will resync on next R0 reentry. */
8046#ifdef VBOX_STRICT
8047 if (CPUMIsHyperDebugStateActive(pVCpu))
8048 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT);
8049#endif
8050 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
8051 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu));
8052 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu));
8053
8054#if HC_ARCH_BITS == 64
8055 /* Restore host-state bits that VT-x only restores partially. */
8056 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
8057 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
8058 {
8059 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu));
8060 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
8061 }
8062 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
8063#endif
8064
8065 /* Restore the lazy host MSRs as we're leaving VT-x context. */
8066 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
8067 {
8068 /* We shouldn't restore the host MSRs without saving the guest MSRs first. */
8069 if (!fImportState)
8070 {
8071 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS);
8072 AssertRCReturn(rc, rc);
8073 }
8074 hmR0VmxLazyRestoreHostMsrs(pVCpu);
8075 Assert(!pVCpu->hm.s.vmx.fLazyMsrs);
8076 }
8077 else
8078 pVCpu->hm.s.vmx.fLazyMsrs = 0;
8079
8080 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
8081 pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = false;
8082
8083 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
8084 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState);
8085 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState);
8086 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit);
8087 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling);
8088 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
8089 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
8090 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
8091 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
8092
8093 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
8094
8095 /** @todo This partially defeats the purpose of having preemption hooks.
8096 * The problem is, deregistering the hooks should be moved to a place that
8097 * lasts until the EMT is about to be destroyed not everytime while leaving HM
8098 * context.
8099 */
8100 int rc = hmR0VmxClearVmcs(pVmcsInfo);
8101 AssertRCReturn(rc, rc);
8102
8103 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
8104 NOREF(idCpu);
8105 return VINF_SUCCESS;
8106}
8107
8108
8109/**
8110 * Leaves the VT-x session.
8111 *
8112 * @returns VBox status code.
8113 * @param pVCpu The cross context virtual CPU structure.
8114 *
8115 * @remarks No-long-jmp zone!!!
8116 */
8117static int hmR0VmxLeaveSession(PVMCPU pVCpu)
8118{
8119 HM_DISABLE_PREEMPT(pVCpu);
8120 HMVMX_ASSERT_CPU_SAFE(pVCpu);
8121 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8122 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8123
8124 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
8125 and done this from the VMXR0ThreadCtxCallback(). */
8126 if (!pVCpu->hm.s.fLeaveDone)
8127 {
8128 int rc2 = hmR0VmxLeave(pVCpu, true /* fImportState */);
8129 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2);
8130 pVCpu->hm.s.fLeaveDone = true;
8131 }
8132 Assert(!pVCpu->cpum.GstCtx.fExtrn);
8133
8134 /*
8135 * !!! IMPORTANT !!!
8136 * If you modify code here, make sure to check whether hmR0VmxCallRing3Callback() needs to be updated too.
8137 */
8138
8139 /* Deregister hook now that we've left HM context before re-enabling preemption. */
8140 /** @todo Deregistering here means we need to VMCLEAR always
8141 * (longjmp/exit-to-r3) in VT-x which is not efficient, eliminate need
8142 * for calling VMMR0ThreadCtxHookDisable here! */
8143 VMMR0ThreadCtxHookDisable(pVCpu);
8144
8145 /* Leave HM context. This takes care of local init (term). */
8146 int rc = HMR0LeaveCpu(pVCpu);
8147
8148 HM_RESTORE_PREEMPT();
8149 return rc;
8150}
8151
8152
8153/**
8154 * Does the necessary state syncing before doing a longjmp to ring-3.
8155 *
8156 * @returns VBox status code.
8157 * @param pVCpu The cross context virtual CPU structure.
8158 *
8159 * @remarks No-long-jmp zone!!!
8160 */
8161DECLINLINE(int) hmR0VmxLongJmpToRing3(PVMCPU pVCpu)
8162{
8163 return hmR0VmxLeaveSession(pVCpu);
8164}
8165
8166
8167/**
8168 * Take necessary actions before going back to ring-3.
8169 *
8170 * An action requires us to go back to ring-3. This function does the necessary
8171 * steps before we can safely return to ring-3. This is not the same as longjmps
8172 * to ring-3, this is voluntary and prepares the guest so it may continue
8173 * executing outside HM (recompiler/IEM).
8174 *
8175 * @returns VBox status code.
8176 * @param pVCpu The cross context virtual CPU structure.
8177 * @param rcExit The reason for exiting to ring-3. Can be
8178 * VINF_VMM_UNKNOWN_RING3_CALL.
8179 */
8180static int hmR0VmxExitToRing3(PVMCPU pVCpu, VBOXSTRICTRC rcExit)
8181{
8182 Assert(pVCpu);
8183 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
8184
8185 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
8186 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
8187 {
8188 VMXGetCurrentVmcs(&pVCpu->hm.s.vmx.LastError.HCPhysCurrentVmcs);
8189 pVCpu->hm.s.vmx.LastError.u32VmcsRev = *(uint32_t *)pVmcsInfo->pvVmcs;
8190 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
8191 /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */
8192 }
8193
8194 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
8195 VMMRZCallRing3Disable(pVCpu);
8196 Log4Func(("rcExit=%d\n", VBOXSTRICTRC_VAL(rcExit)));
8197
8198 /*
8199 * Convert any pending HM events back to TRPM due to premature exits to ring-3.
8200 * We need to do this only on returns to ring-3 and not for longjmps to ring3.
8201 *
8202 * This is because execution may continue from ring-3 and we would need to inject
8203 * the event from there (hence place it back in TRPM).
8204 */
8205 if (pVCpu->hm.s.Event.fPending)
8206 {
8207 hmR0VmxPendingEventToTrpmTrap(pVCpu);
8208 Assert(!pVCpu->hm.s.Event.fPending);
8209
8210 /* Clear the events from the VMCS. */
8211 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0);
8212 AssertRCReturn(rc, rc);
8213 }
8214#ifdef VBOX_STRICT
8215 else
8216 {
8217 /*
8218 * Ensure we don't accidentally clear a pending HM event without clearing the VMCS.
8219 * This can be pretty hard to debug otherwise, interrupts might get injected twice
8220 * occasionally, see @bugref{9180#c42}.
8221 */
8222 uint32_t uEntryIntInfo;
8223 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &uEntryIntInfo);
8224 AssertRC(rc);
8225 Assert(!VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo));
8226 }
8227#endif
8228
8229 /*
8230 * Clear the interrupt-window and NMI-window VMCS controls as we could have got
8231 * a VM-exit with higher priority than interrupt-window or NMI-window VM-exits
8232 * (e.g. TPR below threshold).
8233 */
8234 int rc = hmR0VmxClearIntWindowExitVmcs(pVmcsInfo);
8235 rc |= hmR0VmxClearNmiWindowExitVmcs(pVmcsInfo);
8236 AssertRCReturn(rc, rc);
8237
8238 /* If we're emulating an instruction, we shouldn't have any TRPM traps pending
8239 and if we're injecting an event we should have a TRPM trap pending. */
8240 AssertMsg(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
8241#ifndef DEBUG_bird /* Triggered after firing an NMI against NT4SP1, possibly a triple fault in progress. */
8242 AssertMsg(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
8243#endif
8244
8245 /* Save guest state and restore host state bits. */
8246 rc = hmR0VmxLeaveSession(pVCpu);
8247 AssertRCReturn(rc, rc);
8248 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
8249
8250 /* Thread-context hooks are unregistered at this point!!! */
8251
8252 /* Sync recompiler state. */
8253 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
8254 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
8255 | CPUM_CHANGED_LDTR
8256 | CPUM_CHANGED_GDTR
8257 | CPUM_CHANGED_IDTR
8258 | CPUM_CHANGED_TR
8259 | CPUM_CHANGED_HIDDEN_SEL_REGS);
8260 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging
8261 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx))
8262 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
8263
8264 Assert(!pVCpu->hm.s.fClearTrapFlag);
8265
8266 /* Update the exit-to-ring 3 reason. */
8267 pVCpu->hm.s.rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit);
8268
8269 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
8270 if ( rcExit != VINF_EM_RAW_INTERRUPT
8271 || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
8272 {
8273 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL));
8274 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
8275 }
8276
8277 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
8278
8279 /* We do -not- want any longjmp notifications after this! We must return to ring-3 ASAP. */
8280 VMMRZCallRing3RemoveNotification(pVCpu);
8281 VMMRZCallRing3Enable(pVCpu);
8282
8283 return rc;
8284}
8285
8286
8287/**
8288 * VMMRZCallRing3() callback wrapper which saves the guest state before we
8289 * longjump to ring-3 and possibly get preempted.
8290 *
8291 * @returns VBox status code.
8292 * @param pVCpu The cross context virtual CPU structure.
8293 * @param enmOperation The operation causing the ring-3 longjump.
8294 * @param pvUser User argument, currently unused, NULL.
8295 */
8296static DECLCALLBACK(int) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
8297{
8298 RT_NOREF(pvUser);
8299 if (enmOperation == VMMCALLRING3_VM_R0_ASSERTION)
8300 {
8301 /*
8302 * !!! IMPORTANT !!!
8303 * If you modify code here, check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs to be updated too.
8304 * This is a stripped down version which gets out ASAP, trying to not trigger any further assertions.
8305 */
8306 VMMRZCallRing3RemoveNotification(pVCpu);
8307 VMMRZCallRing3Disable(pVCpu);
8308 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
8309 RTThreadPreemptDisable(&PreemptState);
8310
8311 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
8312 hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8313 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
8314 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
8315
8316#if HC_ARCH_BITS == 64
8317 /* Restore host-state bits that VT-x only restores partially. */
8318 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
8319 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
8320 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
8321 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
8322#endif
8323
8324 /* Restore the lazy host MSRs as we're leaving VT-x context. */
8325 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
8326 hmR0VmxLazyRestoreHostMsrs(pVCpu);
8327
8328 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
8329 pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = false;
8330 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
8331
8332 /* Clear the current VMCS data back to memory. */
8333 hmR0VmxClearVmcs(pVmcsInfo);
8334
8335 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
8336 VMMR0ThreadCtxHookDisable(pVCpu);
8337 HMR0LeaveCpu(pVCpu);
8338 RTThreadPreemptRestore(&PreemptState);
8339 return VINF_SUCCESS;
8340 }
8341
8342 Assert(pVCpu);
8343 Assert(pvUser);
8344 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8345 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
8346
8347 VMMRZCallRing3Disable(pVCpu);
8348 Assert(VMMR0IsLogFlushDisabled(pVCpu));
8349
8350 Log4Func((" -> hmR0VmxLongJmpToRing3 enmOperation=%d\n", enmOperation));
8351
8352 int rc = hmR0VmxLongJmpToRing3(pVCpu);
8353 AssertRCReturn(rc, rc);
8354
8355 VMMRZCallRing3Enable(pVCpu);
8356 return VINF_SUCCESS;
8357}
8358
8359
8360/**
8361 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
8362 * stack.
8363 *
8364 * @returns Strict VBox status code (i.e. informational status codes too).
8365 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
8366 * @param pVCpu The cross context virtual CPU structure.
8367 * @param uValue The value to push to the guest stack.
8368 */
8369static VBOXSTRICTRC hmR0VmxRealModeGuestStackPush(PVMCPU pVCpu, uint16_t uValue)
8370{
8371 /*
8372 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
8373 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
8374 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
8375 */
8376 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8377 if (pCtx->sp == 1)
8378 return VINF_EM_RESET;
8379 pCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
8380 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), pCtx->ss.u64Base + pCtx->sp, &uValue, sizeof(uint16_t));
8381 AssertRC(rc);
8382 return rc;
8383}
8384
8385
8386/**
8387 * Injects an event into the guest upon VM-entry by updating the relevant fields
8388 * in the VM-entry area in the VMCS.
8389 *
8390 * @returns Strict VBox status code (i.e. informational status codes too).
8391 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
8392 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
8393 *
8394 * @param pVCpu The cross context virtual CPU structure.
8395 * @param pVmxTransient The VMX-transient structure.
8396 * @param pEvent The event being injected.
8397 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state.
8398 * This will be updated if necessary. This cannot not
8399 * be NULL.
8400 * @param fStepping Whether we're single-stepping guest execution and
8401 * should return VINF_EM_DBG_STEPPED if the event is
8402 * injected directly (registers modified by us, not by
8403 * hardware on VM-entry).
8404 */
8405static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PCHMEVENT pEvent, bool fStepping,
8406 uint32_t *pfIntrState)
8407{
8408 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
8409 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
8410 Assert(pfIntrState);
8411
8412 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8413 uint32_t u32IntInfo = pEvent->u64IntInfo;
8414 uint32_t const u32ErrCode = pEvent->u32ErrCode;
8415 uint32_t const cbInstr = pEvent->cbInstr;
8416 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
8417 uint32_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
8418 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
8419
8420#ifdef VBOX_STRICT
8421 /*
8422 * Validate the error-code-valid bit for hardware exceptions.
8423 * No error codes for exceptions in real-mode.
8424 *
8425 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
8426 */
8427 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
8428 && !CPUMIsGuestInRealModeEx(pCtx))
8429 {
8430 switch (uVector)
8431 {
8432 case X86_XCPT_PF:
8433 case X86_XCPT_DF:
8434 case X86_XCPT_TS:
8435 case X86_XCPT_NP:
8436 case X86_XCPT_SS:
8437 case X86_XCPT_GP:
8438 case X86_XCPT_AC:
8439 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
8440 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
8441 RT_FALL_THRU();
8442 default:
8443 break;
8444 }
8445 }
8446
8447 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
8448 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
8449 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
8450#endif
8451
8452 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
8453
8454 /*
8455 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
8456 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
8457 * interrupt handler in the (real-mode) guest.
8458 *
8459 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
8460 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
8461 */
8462 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
8463 {
8464 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest)
8465 {
8466 /*
8467 * For CPUs with unrestricted guest execution enabled and with the guest
8468 * in real-mode, we must not set the deliver-error-code bit.
8469 *
8470 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
8471 */
8472 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
8473 }
8474 else
8475 {
8476 PVM pVM = pVCpu->CTX_SUFF(pVM);
8477 Assert(PDMVmmDevHeapIsEnabled(pVM));
8478 Assert(pVM->hm.s.vmx.pRealModeTSS);
8479 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
8480
8481 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
8482 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8483 int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
8484 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
8485 AssertRCReturn(rc2, rc2);
8486
8487 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
8488 size_t const cbIdtEntry = sizeof(X86IDTR16);
8489 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
8490 {
8491 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
8492 if (uVector == X86_XCPT_DF)
8493 return VINF_EM_RESET;
8494
8495 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
8496 No error codes for exceptions in real-mode. */
8497 if (uVector == X86_XCPT_GP)
8498 {
8499 uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
8500 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
8501 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
8502 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
8503 HMEVENT EventXcptDf;
8504 RT_ZERO(EventXcptDf);
8505 EventXcptDf.u64IntInfo = uXcptDfInfo;
8506 return hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &EventXcptDf, fStepping, pfIntrState);
8507 }
8508
8509 /*
8510 * If we're injecting an event with no valid IDT entry, inject a #GP.
8511 * No error codes for exceptions in real-mode.
8512 *
8513 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
8514 */
8515 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
8516 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
8517 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
8518 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
8519 HMEVENT EventXcptGp;
8520 RT_ZERO(EventXcptGp);
8521 EventXcptGp.u64IntInfo = uXcptGpInfo;
8522 return hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &EventXcptGp, fStepping, pfIntrState);
8523 }
8524
8525 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
8526 uint16_t uGuestIp = pCtx->ip;
8527 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
8528 {
8529 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
8530 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
8531 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
8532 }
8533 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
8534 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
8535
8536 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
8537 X86IDTR16 IdtEntry;
8538 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
8539 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
8540 AssertRCReturn(rc2, rc2);
8541
8542 /* Construct the stack frame for the interrupt/exception handler. */
8543 VBOXSTRICTRC rcStrict;
8544 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
8545 if (rcStrict == VINF_SUCCESS)
8546 {
8547 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
8548 if (rcStrict == VINF_SUCCESS)
8549 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
8550 }
8551
8552 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
8553 if (rcStrict == VINF_SUCCESS)
8554 {
8555 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
8556 pCtx->rip = IdtEntry.offSel;
8557 pCtx->cs.Sel = IdtEntry.uSel;
8558 pCtx->cs.ValidSel = IdtEntry.uSel;
8559 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
8560 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
8561 && uVector == X86_XCPT_PF)
8562 pCtx->cr2 = GCPtrFault;
8563
8564 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
8565 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8566 | HM_CHANGED_GUEST_RSP);
8567
8568 /*
8569 * If we delivered a hardware exception (other than an NMI) and if there was
8570 * block-by-STI in effect, we should clear it.
8571 */
8572 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
8573 {
8574 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
8575 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
8576 Log4Func(("Clearing inhibition due to STI\n"));
8577 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
8578 }
8579
8580 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
8581 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
8582
8583 /*
8584 * The event has been truly dispatched to the guest. Mark it as no longer pending so
8585 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
8586 */
8587 pVCpu->hm.s.Event.fPending = false;
8588
8589 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
8590 if (fStepping)
8591 rcStrict = VINF_EM_DBG_STEPPED;
8592 }
8593 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
8594 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8595 return rcStrict;
8596 }
8597 }
8598
8599 /*
8600 * Validate.
8601 */
8602 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
8603 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
8604
8605 /*
8606 * Inject the event into the VMCS.
8607 */
8608 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
8609 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
8610 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
8611 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
8612 AssertRCReturn(rc, rc);
8613
8614 /*
8615 * Update guest CR2 if this is a page-fault.
8616 */
8617 if ( VMX_ENTRY_INT_INFO_TYPE(u32IntInfo) == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
8618 && uVector == X86_XCPT_PF)
8619 pCtx->cr2 = GCPtrFault;
8620
8621 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
8622 return VINF_SUCCESS;
8623}
8624
8625
8626/**
8627 * Evaluates the event to be delivered to the guest and sets it as the pending
8628 * event.
8629 *
8630 * @returns Strict VBox status code (i.e. informational status codes too).
8631 * @param pVCpu The cross context virtual CPU structure.
8632 * @param pVmxTransient The VMX-transient structure.
8633 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
8634 */
8635static VBOXSTRICTRC hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t *pfIntrState)
8636{
8637 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8638 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8639
8640 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
8641 uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu, pVmcsInfo);
8642 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
8643 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
8644 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
8645
8646 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS));
8647 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
8648 Assert(!fBlockSti || pCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
8649 Assert(!TRPMHasTrap(pVCpu));
8650 Assert(pfIntrState);
8651
8652 *pfIntrState = fIntrState;
8653
8654 /*
8655 * Toggling of interrupt force-flags here is safe since we update TRPM on premature exits
8656 * to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must NOT restore these force-flags.
8657 */
8658 /** @todo SMI. SMIs take priority over NMIs. */
8659 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */
8660 {
8661 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
8662 if ( !pVCpu->hm.s.Event.fPending
8663 && !fBlockNmi
8664 && !fBlockSti
8665 && !fBlockMovSS)
8666 {
8667#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8668 if ( pVmxTransient->fIsNestedGuest
8669 && CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_NMI_EXIT))
8670 return IEMExecVmxVmexitNmi(pVCpu);
8671#endif
8672 hmR0VmxSetPendingXcptNmi(pVCpu);
8673 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
8674 Log4Func(("Pending NMI\n"));
8675 }
8676 else
8677 hmR0VmxSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
8678 }
8679 /*
8680 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns
8681 * a valid interrupt we -must- deliver the interrupt. We can no longer re-request it from the APIC.
8682 */
8683 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
8684 && !pVCpu->hm.s.fSingleInstruction)
8685 {
8686 Assert(!DBGFIsStepping(pVCpu));
8687 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
8688 AssertRCReturn(rc, rc);
8689 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);
8690 if ( !pVCpu->hm.s.Event.fPending
8691 && !fBlockInt
8692 && !fBlockSti
8693 && !fBlockMovSS)
8694 {
8695#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8696 if ( pVmxTransient->fIsNestedGuest
8697 && CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
8698 {
8699 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0/* uVector */, true /* fIntPending */);
8700 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
8701 return rcStrict;
8702 }
8703#endif
8704 uint8_t u8Interrupt;
8705 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
8706 if (RT_SUCCESS(rc))
8707 {
8708#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8709 if ( pVmxTransient->fIsNestedGuest
8710 && CPUMIsGuestVmxPinCtlsSet(pVCpu, pCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
8711 && CPUMIsGuestVmxExitCtlsSet(pVCpu, pCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
8712 {
8713 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
8714 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
8715 return rcStrict;
8716 }
8717#endif
8718 hmR0VmxSetPendingExtInt(pVCpu, u8Interrupt);
8719 Log4Func(("Pending external interrupt vector %#x\n", u8Interrupt));
8720 }
8721 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
8722 {
8723 if ( !pVmxTransient->fIsNestedGuest
8724 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
8725 hmR0VmxApicSetTprThreshold(pVCpu, pVmcsInfo, u8Interrupt >> 4);
8726 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
8727
8728 /*
8729 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
8730 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
8731 * need to re-set this force-flag here.
8732 */
8733 }
8734 else
8735 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
8736 }
8737 else
8738 hmR0VmxSetIntWindowExitVmcs(pVCpu, pVmcsInfo);
8739 }
8740
8741 return VINF_SUCCESS;
8742}
8743
8744
8745/**
8746 * Injects any pending events into the guest if the guest is in a state to
8747 * receive them.
8748 *
8749 * @returns Strict VBox status code (i.e. informational status codes too).
8750 * @param pVCpu The cross context virtual CPU structure.
8751 * @param pVmxTransient The VMX-transient structure.
8752 * @param fIntrState The VT-x guest-interruptibility state.
8753 * @param fStepping Whether we are single-stepping the guest using the
8754 * hypervisor debugger and should return
8755 * VINF_EM_DBG_STEPPED if the event was dispatched
8756 * directly.
8757 */
8758static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t fIntrState, bool fStepping)
8759{
8760 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
8761 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8762
8763 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
8764 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
8765
8766 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
8767 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
8768 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
8769 Assert(!TRPMHasTrap(pVCpu));
8770
8771 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
8772 if (pVCpu->hm.s.Event.fPending)
8773 {
8774 /*
8775 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
8776 * pending even while injecting an event and in this case, we want a VM-exit as soon as
8777 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
8778 *
8779 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
8780 */
8781 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
8782#ifdef VBOX_STRICT
8783 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
8784 {
8785 bool const fBlockInt = !(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
8786 Assert(!fBlockInt);
8787 Assert(!fBlockSti);
8788 Assert(!fBlockMovSS);
8789 }
8790 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
8791 {
8792 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
8793 Assert(!fBlockSti);
8794 Assert(!fBlockMovSS);
8795 Assert(!fBlockNmi);
8796 }
8797#endif
8798 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
8799 uIntType));
8800
8801 /*
8802 * Inject the event and get any changes to the guest-interruptibility state.
8803 *
8804 * The guest-interruptibility state may need to be updated if we inject the event
8805 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
8806 */
8807 rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &pVCpu->hm.s.Event, fStepping, &fIntrState);
8808 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
8809
8810 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
8811 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
8812 else
8813 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
8814 }
8815
8816 /*
8817 * Update the guest-interruptibility state.
8818 *
8819 * This is required for the real-on-v86 software interrupt injection case above, as well as
8820 * updates to the guest state from ring-3 or IEM/REM.
8821 */
8822 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
8823 AssertRCReturn(rc, rc);
8824
8825 /*
8826 * There's no need to clear the VM-entry interruption-information field here if we're not
8827 * injecting anything. VT-x clears the valid bit on every VM-exit.
8828 *
8829 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
8830 */
8831
8832 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
8833 NOREF(fBlockMovSS); NOREF(fBlockSti);
8834 return rcStrict;
8835}
8836
8837
8838/**
8839 * Enters the VT-x session.
8840 *
8841 * @returns VBox status code.
8842 * @param pVCpu The cross context virtual CPU structure.
8843 */
8844VMMR0DECL(int) VMXR0Enter(PVMCPU pVCpu)
8845{
8846 AssertPtr(pVCpu);
8847 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported);
8848 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8849
8850 LogFlowFunc(("pVCpu=%p\n", pVCpu));
8851 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
8852 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
8853
8854#ifdef VBOX_STRICT
8855 /* At least verify VMX is enabled, since we can't check if we're in VMX root mode without #GP'ing. */
8856 RTCCUINTREG uHostCR4 = ASMGetCR4();
8857 if (!(uHostCR4 & X86_CR4_VMXE))
8858 {
8859 LogRelFunc(("X86_CR4_VMXE bit in CR4 is not set!\n"));
8860 return VERR_VMX_X86_CR4_VMXE_CLEARED;
8861 }
8862#endif
8863
8864 /*
8865 * Load the appropriate VMCS as the current and active one.
8866 */
8867 PVMXVMCSINFO pVmcsInfo;
8868 bool const fInNestedGuestMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
8869 if (!fInNestedGuestMode)
8870 pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfo;
8871 else
8872 pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfoNstGst;
8873 int rc = hmR0VmxLoadVmcs(pVmcsInfo);
8874 if (RT_SUCCESS(rc))
8875 {
8876 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs = fInNestedGuestMode;
8877 pVCpu->hm.s.fLeaveDone = false;
8878 Log4Func(("Loaded Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8879
8880 /*
8881 * Do the EMT scheduled L1D flush here if needed.
8882 */
8883 if (pVCpu->CTX_SUFF(pVM)->hm.s.fL1dFlushOnSched)
8884 ASMWrMsr(MSR_IA32_FLUSH_CMD, MSR_IA32_FLUSH_CMD_F_L1D);
8885 }
8886 return rc;
8887}
8888
8889
8890/**
8891 * The thread-context callback (only on platforms which support it).
8892 *
8893 * @param enmEvent The thread-context event.
8894 * @param pVCpu The cross context virtual CPU structure.
8895 * @param fGlobalInit Whether global VT-x/AMD-V init. was used.
8896 * @thread EMT(pVCpu)
8897 */
8898VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
8899{
8900 NOREF(fGlobalInit);
8901
8902 switch (enmEvent)
8903 {
8904 case RTTHREADCTXEVENT_OUT:
8905 {
8906 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8907 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
8908 VMCPU_ASSERT_EMT(pVCpu);
8909
8910 /* No longjmps (logger flushes, locks) in this fragile context. */
8911 VMMRZCallRing3Disable(pVCpu);
8912 Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId()));
8913
8914 /* Restore host-state (FPU, debug etc.) */
8915 if (!pVCpu->hm.s.fLeaveDone)
8916 {
8917 /*
8918 * Do -not- import the guest-state here as we might already be in the middle of importing
8919 * it, esp. bad if we're holding the PGM lock, see comment in hmR0VmxImportGuestState().
8920 */
8921 hmR0VmxLeave(pVCpu, false /* fImportState */);
8922 pVCpu->hm.s.fLeaveDone = true;
8923 }
8924
8925 /* Leave HM context, takes care of local init (term). */
8926 int rc = HMR0LeaveCpu(pVCpu);
8927 AssertRC(rc);
8928
8929 /* Restore longjmp state. */
8930 VMMRZCallRing3Enable(pVCpu);
8931 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreempt);
8932 break;
8933 }
8934
8935 case RTTHREADCTXEVENT_IN:
8936 {
8937 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8938 Assert(VMMR0ThreadCtxHookIsEnabled(pVCpu));
8939 VMCPU_ASSERT_EMT(pVCpu);
8940
8941 /* No longjmps here, as we don't want to trigger preemption (& its hook) while resuming. */
8942 VMMRZCallRing3Disable(pVCpu);
8943 Log4Func(("Resumed: HostCpuId=%u\n", RTMpCpuId()));
8944
8945 /* Initialize the bare minimum state required for HM. This takes care of
8946 initializing VT-x if necessary (onlined CPUs, local init etc.) */
8947 int rc = hmR0EnterCpu(pVCpu);
8948 AssertRC(rc);
8949 Assert( (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
8950 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
8951
8952 /* Load the active VMCS as the current one. */
8953 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
8954 rc = hmR0VmxLoadVmcs(pVmcsInfo);
8955 AssertRC(rc);
8956 Log4Func(("Resumed: Loaded Vmcs. HostCpuId=%u\n", RTMpCpuId()));
8957 pVCpu->hm.s.fLeaveDone = false;
8958
8959 /* Do the EMT scheduled L1D flush if needed. */
8960 if (pVCpu->CTX_SUFF(pVM)->hm.s.fL1dFlushOnSched)
8961 ASMWrMsr(MSR_IA32_FLUSH_CMD, MSR_IA32_FLUSH_CMD_F_L1D);
8962
8963 /* Restore longjmp state. */
8964 VMMRZCallRing3Enable(pVCpu);
8965 break;
8966 }
8967
8968 default:
8969 break;
8970 }
8971}
8972
8973
8974/**
8975 * Exports the host state into the VMCS host-state area.
8976 * Sets up the VM-exit MSR-load area.
8977 *
8978 * The CPU state will be loaded from these fields on every successful VM-exit.
8979 *
8980 * @returns VBox status code.
8981 * @param pVCpu The cross context virtual CPU structure.
8982 *
8983 * @remarks No-long-jump zone!!!
8984 */
8985static int hmR0VmxExportHostState(PVMCPU pVCpu)
8986{
8987 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8988
8989 int rc = VINF_SUCCESS;
8990 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)
8991 {
8992 rc = hmR0VmxExportHostControlRegs();
8993 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8994
8995 rc = hmR0VmxExportHostSegmentRegs(pVCpu);
8996 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
8997
8998 rc = hmR0VmxExportHostMsrs(pVCpu);
8999 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9000
9001 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_HOST_CONTEXT;
9002 }
9003 return rc;
9004}
9005
9006
9007/**
9008 * Saves the host state in the VMCS host-state.
9009 *
9010 * @returns VBox status code.
9011 * @param pVCpu The cross context virtual CPU structure.
9012 *
9013 * @remarks No-long-jump zone!!!
9014 */
9015VMMR0DECL(int) VMXR0ExportHostState(PVMCPU pVCpu)
9016{
9017 AssertPtr(pVCpu);
9018 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
9019
9020 /*
9021 * Export the host state here while entering HM context.
9022 * When thread-context hooks are used, we might get preempted and have to re-save the host
9023 * state but most of the time we won't be, so do it here before we disable interrupts.
9024 */
9025 return hmR0VmxExportHostState(pVCpu);
9026}
9027
9028
9029/**
9030 * Exports the guest state into the VMCS guest-state area.
9031 *
9032 * The will typically be done before VM-entry when the guest-CPU state and the
9033 * VMCS state may potentially be out of sync.
9034 *
9035 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas. Sets up the
9036 * VM-entry controls.
9037 * Sets up the appropriate VMX non-root function to execute guest code based on
9038 * the guest CPU mode.
9039 *
9040 * @returns VBox strict status code.
9041 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
9042 * without unrestricted guest execution and the VMMDev is not presently
9043 * mapped (e.g. EFI32).
9044 *
9045 * @param pVCpu The cross context virtual CPU structure.
9046 * @param pVmxTransient The VMX-transient structure.
9047 *
9048 * @remarks No-long-jump zone!!!
9049 */
9050static VBOXSTRICTRC hmR0VmxExportGuestState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
9051{
9052 AssertPtr(pVCpu);
9053 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
9054 LogFlowFunc(("pVCpu=%p\n", pVCpu));
9055
9056 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
9057
9058 /*
9059 * Determine real-on-v86 mode.
9060 * Used when the guest is in real-mode and unrestricted guest execution is not used.
9061 */
9062 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9063 if ( pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest
9064 || !CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx))
9065 pVmcsInfo->RealMode. fRealOnV86Active = false;
9066 else
9067 {
9068 Assert(!pVmxTransient->fIsNestedGuest);
9069 pVmcsInfo->RealMode.fRealOnV86Active = true;
9070 }
9071
9072 /*
9073 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
9074 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
9075 */
9076 /** @todo r=ramshankar: Move hmR0VmxSelectVMRunHandler inside
9077 * hmR0VmxExportGuestEntryExitCtls and do it conditionally. There shouldn't
9078 * be a need to evaluate this everytime since I'm pretty sure we intercept
9079 * all guest paging mode changes. */
9080 int rc = hmR0VmxSelectVMRunHandler(pVCpu, pVmxTransient);
9081 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9082
9083 rc = hmR0VmxExportGuestEntryExitCtls(pVCpu, pVmxTransient);
9084 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9085
9086 rc = hmR0VmxExportGuestCR0(pVCpu, pVmxTransient);
9087 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9088
9089 VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu, pVmxTransient);
9090 if (rcStrict == VINF_SUCCESS)
9091 { /* likely */ }
9092 else
9093 {
9094 Assert(rcStrict == VINF_EM_RESCHEDULE_REM || RT_FAILURE_NP(rcStrict));
9095 return rcStrict;
9096 }
9097
9098 rc = hmR0VmxExportGuestSegRegsXdtr(pVCpu, pVmxTransient);
9099 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9100
9101 rc = hmR0VmxExportGuestMsrs(pVCpu, pVmxTransient);
9102 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9103
9104 rc = hmR0VmxExportGuestApicTpr(pVCpu, pVmxTransient);
9105 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9106
9107 rc = hmR0VmxExportGuestXcptIntercepts(pVCpu, pVmxTransient);
9108 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9109
9110 rc = hmR0VmxExportGuestRip(pVCpu);
9111 rc |= hmR0VmxExportGuestRsp(pVCpu);
9112 rc |= hmR0VmxExportGuestRflags(pVCpu, pVmxTransient);
9113 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9114
9115 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
9116 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP)
9117 | HM_CHANGED_GUEST_CR2
9118 | (HM_CHANGED_GUEST_DR_MASK & ~HM_CHANGED_GUEST_DR7)
9119 | HM_CHANGED_GUEST_X87
9120 | HM_CHANGED_GUEST_SSE_AVX
9121 | HM_CHANGED_GUEST_OTHER_XSAVE
9122 | HM_CHANGED_GUEST_XCRx
9123 | HM_CHANGED_GUEST_KERNEL_GS_BASE /* Part of lazy or auto load-store MSRs. */
9124 | HM_CHANGED_GUEST_SYSCALL_MSRS /* Part of lazy or auto load-store MSRs. */
9125 | HM_CHANGED_GUEST_TSC_AUX
9126 | HM_CHANGED_GUEST_OTHER_MSRS
9127 | HM_CHANGED_GUEST_HWVIRT /* More accurate PLE handling someday? */
9128 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK)));
9129
9130 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x);
9131 return rc;
9132}
9133
9134
9135/**
9136 * Exports the state shared between the host and guest into the VMCS.
9137 *
9138 * @param pVCpu The cross context virtual CPU structure.
9139 * @param pVmxTransient The VMX-transient structure.
9140 *
9141 * @remarks No-long-jump zone!!!
9142 */
9143static void hmR0VmxExportSharedState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
9144{
9145 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
9146 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
9147
9148 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
9149 {
9150 int rc = hmR0VmxExportSharedDebugState(pVCpu, pVmxTransient);
9151 AssertRC(rc);
9152 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
9153
9154 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
9155 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS)
9156 {
9157 rc = hmR0VmxExportGuestRflags(pVCpu, pVmxTransient);
9158 AssertRC(rc);
9159 }
9160 }
9161
9162 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS)
9163 {
9164 hmR0VmxLazyLoadGuestMsrs(pVCpu);
9165 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS;
9166 }
9167
9168 AssertMsg(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE),
9169 ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
9170}
9171
9172
9173/**
9174 * Worker for loading the guest-state bits in the inner VT-x execution loop.
9175 *
9176 * @returns Strict VBox status code (i.e. informational status codes too).
9177 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
9178 * without unrestricted guest execution and the VMMDev is not presently
9179 * mapped (e.g. EFI32).
9180 *
9181 * @param pVCpu The cross context virtual CPU structure.
9182 * @param pVmxTransient The VMX-transient structure.
9183 *
9184 * @remarks No-long-jump zone!!!
9185 */
9186static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
9187{
9188 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
9189 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
9190 Assert(VMMR0IsLogFlushDisabled(pVCpu));
9191
9192#ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
9193 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
9194#endif
9195
9196 /*
9197 * For many exits it's only RIP that changes and hence try to export it first
9198 * without going through a lot of change flag checks.
9199 */
9200 VBOXSTRICTRC rcStrict;
9201 uint64_t fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
9202 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
9203 if ((fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) == HM_CHANGED_GUEST_RIP)
9204 {
9205 rcStrict = hmR0VmxExportGuestRip(pVCpu);
9206 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9207 { /* likely */}
9208 else
9209 AssertMsgFailedReturn(("Failed to export guest RIP! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
9210 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportMinimal);
9211 }
9212 else if (fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
9213 {
9214 rcStrict = hmR0VmxExportGuestState(pVCpu, pVmxTransient);
9215 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9216 { /* likely */}
9217 else
9218 {
9219 AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, ("Failed to export guest state! rc=%Rrc\n",
9220 VBOXSTRICTRC_VAL(rcStrict)));
9221 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
9222 return rcStrict;
9223 }
9224 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
9225 }
9226 else
9227 rcStrict = VINF_SUCCESS;
9228
9229#ifdef VBOX_STRICT
9230 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
9231 fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
9232 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
9233 AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)),
9234 ("fCtxChanged=%#RX64\n", fCtxChanged));
9235#endif
9236 return rcStrict;
9237}
9238
9239
9240/**
9241 * Tries to determine what part of the guest-state VT-x has deemed as invalid
9242 * and update error record fields accordingly.
9243 *
9244 * @return VMX_IGS_* return codes.
9245 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
9246 * wrong with the guest state.
9247 *
9248 * @param pVCpu The cross context virtual CPU structure.
9249 * @param pVmcsInfo The VMCS info. object.
9250 *
9251 * @remarks This function assumes our cache of the VMCS controls
9252 * are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
9253 */
9254static uint32_t hmR0VmxCheckGuestState(PVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
9255{
9256#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
9257#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \
9258 uError = (err); \
9259 break; \
9260 } else do { } while (0)
9261
9262 int rc;
9263 PVM pVM = pVCpu->CTX_SUFF(pVM);
9264 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9265 uint32_t uError = VMX_IGS_ERROR;
9266 uint32_t u32Val;
9267 bool const fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
9268
9269 do
9270 {
9271 /*
9272 * CR0.
9273 */
9274 uint32_t fSetCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
9275 uint32_t const fZapCr0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
9276 /* Exceptions for unrestricted guest execution for fixed CR0 bits (PE, PG).
9277 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
9278 if (fUnrestrictedGuest)
9279 fSetCr0 &= ~(X86_CR0_PE | X86_CR0_PG);
9280
9281 uint32_t u32GuestCr0;
9282 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32GuestCr0);
9283 AssertRCBreak(rc);
9284 HMVMX_CHECK_BREAK((u32GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
9285 HMVMX_CHECK_BREAK(!(u32GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
9286 if ( !fUnrestrictedGuest
9287 && (u32GuestCr0 & X86_CR0_PG)
9288 && !(u32GuestCr0 & X86_CR0_PE))
9289 {
9290 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
9291 }
9292
9293 /*
9294 * CR4.
9295 */
9296 uint64_t const fSetCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
9297 uint64_t const fZapCr4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
9298
9299 uint32_t u32GuestCr4;
9300 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32GuestCr4);
9301 AssertRCBreak(rc);
9302 HMVMX_CHECK_BREAK((u32GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
9303 HMVMX_CHECK_BREAK(!(u32GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
9304
9305 /*
9306 * IA32_DEBUGCTL MSR.
9307 */
9308 uint64_t u64Val;
9309 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
9310 AssertRCBreak(rc);
9311 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
9312 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
9313 {
9314 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
9315 }
9316 uint64_t u64DebugCtlMsr = u64Val;
9317
9318#ifdef VBOX_STRICT
9319 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
9320 AssertRCBreak(rc);
9321 Assert(u32Val == pVmcsInfo->u32EntryCtls);
9322#endif
9323 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
9324
9325 /*
9326 * RIP and RFLAGS.
9327 */
9328 uint32_t u32Eflags;
9329#if HC_ARCH_BITS == 64
9330 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
9331 AssertRCBreak(rc);
9332 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
9333 if ( !fLongModeGuest
9334 || !pCtx->cs.Attr.n.u1Long)
9335 {
9336 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
9337 }
9338 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
9339 * must be identical if the "IA-32e mode guest" VM-entry
9340 * control is 1 and CS.L is 1. No check applies if the
9341 * CPU supports 64 linear-address bits. */
9342
9343 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
9344 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
9345 AssertRCBreak(rc);
9346 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
9347 VMX_IGS_RFLAGS_RESERVED);
9348 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
9349 u32Eflags = u64Val;
9350#else
9351 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
9352 AssertRCBreak(rc);
9353 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */
9354 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
9355#endif
9356
9357 if ( fLongModeGuest
9358 || ( fUnrestrictedGuest
9359 && !(u32GuestCr0 & X86_CR0_PE)))
9360 {
9361 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
9362 }
9363
9364 uint32_t u32EntryInfo;
9365 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
9366 AssertRCBreak(rc);
9367 if ( VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo)
9368 && VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_EXT_INT)
9369 {
9370 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
9371 }
9372
9373 /*
9374 * 64-bit checks.
9375 */
9376#if HC_ARCH_BITS == 64
9377 if (fLongModeGuest)
9378 {
9379 HMVMX_CHECK_BREAK(u32GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
9380 HMVMX_CHECK_BREAK(u32GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
9381 }
9382
9383 if ( !fLongModeGuest
9384 && (u32GuestCr4 & X86_CR4_PCIDE))
9385 {
9386 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
9387 }
9388
9389 /** @todo CR3 field must be such that bits 63:52 and bits in the range
9390 * 51:32 beyond the processor's physical-address width are 0. */
9391
9392 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
9393 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
9394 {
9395 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
9396 }
9397
9398 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
9399 AssertRCBreak(rc);
9400 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
9401
9402 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
9403 AssertRCBreak(rc);
9404 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
9405#endif
9406
9407 /*
9408 * PERF_GLOBAL MSR.
9409 */
9410 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
9411 {
9412 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
9413 AssertRCBreak(rc);
9414 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
9415 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
9416 }
9417
9418 /*
9419 * PAT MSR.
9420 */
9421 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
9422 {
9423 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
9424 AssertRCBreak(rc);
9425 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
9426 for (unsigned i = 0; i < 8; i++)
9427 {
9428 uint8_t u8Val = (u64Val & 0xff);
9429 if ( u8Val != 0 /* UC */
9430 && u8Val != 1 /* WC */
9431 && u8Val != 4 /* WT */
9432 && u8Val != 5 /* WP */
9433 && u8Val != 6 /* WB */
9434 && u8Val != 7 /* UC- */)
9435 {
9436 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
9437 }
9438 u64Val >>= 8;
9439 }
9440 }
9441
9442 /*
9443 * EFER MSR.
9444 */
9445 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
9446 {
9447 Assert(pVM->hm.s.vmx.fSupportsVmcsEfer);
9448 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
9449 AssertRCBreak(rc);
9450 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
9451 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
9452 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
9453 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
9454 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
9455 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
9456 * iemVmxVmentryCheckGuestState(). */
9457 HMVMX_CHECK_BREAK( fUnrestrictedGuest
9458 || !(u32GuestCr0 & X86_CR0_PG)
9459 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
9460 VMX_IGS_EFER_LMA_LME_MISMATCH);
9461 }
9462
9463 /*
9464 * Segment registers.
9465 */
9466 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9467 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
9468 if (!(u32Eflags & X86_EFL_VM))
9469 {
9470 /* CS */
9471 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
9472 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
9473 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
9474 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
9475 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9476 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
9477 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9478 /* CS cannot be loaded with NULL in protected mode. */
9479 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
9480 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
9481 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
9482 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
9483 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
9484 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
9485 else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
9486 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
9487 else
9488 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
9489
9490 /* SS */
9491 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9492 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
9493 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
9494 if ( !(pCtx->cr0 & X86_CR0_PE)
9495 || pCtx->cs.Attr.n.u4Type == 3)
9496 {
9497 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
9498 }
9499 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
9500 {
9501 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
9502 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
9503 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
9504 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
9505 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
9506 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9507 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
9508 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9509 }
9510
9511 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSReg(). */
9512 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
9513 {
9514 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
9515 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
9516 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9517 || pCtx->ds.Attr.n.u4Type > 11
9518 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9519 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
9520 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
9521 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
9522 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9523 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
9524 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9525 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9526 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
9527 }
9528 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
9529 {
9530 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
9531 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
9532 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9533 || pCtx->es.Attr.n.u4Type > 11
9534 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9535 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
9536 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
9537 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
9538 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9539 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
9540 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9541 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9542 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
9543 }
9544 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
9545 {
9546 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
9547 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
9548 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9549 || pCtx->fs.Attr.n.u4Type > 11
9550 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
9551 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
9552 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
9553 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
9554 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
9555 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
9556 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
9557 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9558 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
9559 }
9560 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
9561 {
9562 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
9563 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
9564 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
9565 || pCtx->gs.Attr.n.u4Type > 11
9566 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
9567 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
9568 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
9569 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
9570 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
9571 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
9572 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
9573 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9574 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
9575 }
9576 /* 64-bit capable CPUs. */
9577#if HC_ARCH_BITS == 64
9578 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
9579 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
9580 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9581 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
9582 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
9583 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
9584 VMX_IGS_LONGMODE_SS_BASE_INVALID);
9585 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
9586 VMX_IGS_LONGMODE_DS_BASE_INVALID);
9587 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
9588 VMX_IGS_LONGMODE_ES_BASE_INVALID);
9589#endif
9590 }
9591 else
9592 {
9593 /* V86 mode checks. */
9594 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
9595 if (pVmcsInfo->RealMode.fRealOnV86Active)
9596 {
9597 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
9598 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
9599 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
9600 }
9601 else
9602 {
9603 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
9604 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
9605 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
9606 }
9607
9608 /* CS */
9609 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
9610 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
9611 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
9612 /* SS */
9613 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
9614 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
9615 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
9616 /* DS */
9617 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
9618 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
9619 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
9620 /* ES */
9621 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
9622 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
9623 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
9624 /* FS */
9625 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
9626 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
9627 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
9628 /* GS */
9629 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
9630 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
9631 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
9632 /* 64-bit capable CPUs. */
9633#if HC_ARCH_BITS == 64
9634 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
9635 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
9636 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9637 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
9638 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
9639 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
9640 VMX_IGS_LONGMODE_SS_BASE_INVALID);
9641 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
9642 VMX_IGS_LONGMODE_DS_BASE_INVALID);
9643 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
9644 VMX_IGS_LONGMODE_ES_BASE_INVALID);
9645#endif
9646 }
9647
9648 /*
9649 * TR.
9650 */
9651 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
9652 /* 64-bit capable CPUs. */
9653#if HC_ARCH_BITS == 64
9654 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
9655#endif
9656 if (fLongModeGuest)
9657 {
9658 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
9659 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
9660 }
9661 else
9662 {
9663 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
9664 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
9665 VMX_IGS_TR_ATTR_TYPE_INVALID);
9666 }
9667 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
9668 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
9669 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
9670 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
9671 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
9672 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
9673 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
9674 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
9675
9676 /*
9677 * GDTR and IDTR.
9678 */
9679#if HC_ARCH_BITS == 64
9680 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
9681 AssertRCBreak(rc);
9682 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
9683
9684 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
9685 AssertRCBreak(rc);
9686 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
9687#endif
9688
9689 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
9690 AssertRCBreak(rc);
9691 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
9692
9693 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
9694 AssertRCBreak(rc);
9695 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
9696
9697 /*
9698 * Guest Non-Register State.
9699 */
9700 /* Activity State. */
9701 uint32_t u32ActivityState;
9702 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
9703 AssertRCBreak(rc);
9704 HMVMX_CHECK_BREAK( !u32ActivityState
9705 || (u32ActivityState & RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
9706 VMX_IGS_ACTIVITY_STATE_INVALID);
9707 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
9708 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
9709 uint32_t u32IntrState;
9710 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
9711 AssertRCBreak(rc);
9712 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
9713 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
9714 {
9715 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
9716 }
9717
9718 /** @todo Activity state and injecting interrupts. Left as a todo since we
9719 * currently don't use activity states but ACTIVE. */
9720
9721 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
9722 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
9723
9724 /* Guest interruptibility-state. */
9725 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
9726 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
9727 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
9728 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
9729 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
9730 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
9731 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
9732 if (VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo))
9733 {
9734 if (VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_EXT_INT)
9735 {
9736 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
9737 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
9738 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
9739 }
9740 else if (VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_NMI)
9741 {
9742 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
9743 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
9744 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
9745 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
9746 }
9747 }
9748 /** @todo Assumes the processor is not in SMM. */
9749 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
9750 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
9751 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
9752 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
9753 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
9754 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
9755 && VMX_ENTRY_INT_INFO_IS_VALID(u32EntryInfo)
9756 && VMX_ENTRY_INT_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INT_INFO_TYPE_NMI)
9757 {
9758 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI),
9759 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
9760 }
9761
9762 /* Pending debug exceptions. */
9763#if HC_ARCH_BITS == 64
9764 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
9765 AssertRCBreak(rc);
9766 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
9767 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
9768 u32Val = u64Val; /* For pending debug exceptions checks below. */
9769#else
9770 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u32Val);
9771 AssertRCBreak(rc);
9772 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
9773 HMVMX_CHECK_BREAK(!(u32Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
9774#endif
9775
9776 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
9777 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
9778 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
9779 {
9780 if ( (u32Eflags & X86_EFL_TF)
9781 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
9782 {
9783 /* Bit 14 is PendingDebug.BS. */
9784 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
9785 }
9786 if ( !(u32Eflags & X86_EFL_TF)
9787 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
9788 {
9789 /* Bit 14 is PendingDebug.BS. */
9790 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
9791 }
9792 }
9793
9794 /* VMCS link pointer. */
9795 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
9796 AssertRCBreak(rc);
9797 if (u64Val != UINT64_C(0xffffffffffffffff))
9798 {
9799 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
9800 /** @todo Bits beyond the processor's physical-address width MBZ. */
9801 /** @todo 32-bit located in memory referenced by value of this field (as a
9802 * physical address) must contain the processor's VMCS revision ID. */
9803 /** @todo SMM checks. */
9804 }
9805
9806 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
9807 * not using nested paging? */
9808 if ( pVM->hm.s.fNestedPaging
9809 && !fLongModeGuest
9810 && CPUMIsGuestInPAEModeEx(pCtx))
9811 {
9812 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
9813 AssertRCBreak(rc);
9814 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9815
9816 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
9817 AssertRCBreak(rc);
9818 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9819
9820 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
9821 AssertRCBreak(rc);
9822 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9823
9824 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
9825 AssertRCBreak(rc);
9826 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
9827 }
9828
9829 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
9830 if (uError == VMX_IGS_ERROR)
9831 uError = VMX_IGS_REASON_NOT_FOUND;
9832 } while (0);
9833
9834 pVCpu->hm.s.u32HMError = uError;
9835 return uError;
9836
9837#undef HMVMX_ERROR_BREAK
9838#undef HMVMX_CHECK_BREAK
9839}
9840
9841
9842/**
9843 * Setup the APIC-access page for virtualizing APIC access.
9844 *
9845 * This can cause a longjumps to R3 due to the acquisition of the PGM lock, hence
9846 * this not done as part of exporting guest state, see @bugref{8721}.
9847 *
9848 * @returns VBox status code.
9849 * @param pVCpu The cross context virtual CPU structure.
9850 */
9851static int hmR0VmxMapHCApicAccessPage(PVMCPU pVCpu)
9852{
9853 PVM pVM = pVCpu->CTX_SUFF(pVM);
9854 uint64_t const u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu);
9855
9856 Assert(PDMHasApic(pVM));
9857 Assert(u64MsrApicBase);
9858
9859 RTGCPHYS const GCPhysApicBase = u64MsrApicBase & PAGE_BASE_GC_MASK;
9860 Log4Func(("Mappping HC APIC-access page at %#RGp\n", GCPhysApicBase));
9861
9862 /* Unalias any existing mapping. */
9863 int rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
9864 AssertRCReturn(rc, rc);
9865
9866 /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */
9867 Assert(pVM->hm.s.vmx.HCPhysApicAccess != NIL_RTHCPHYS);
9868 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
9869 AssertRCReturn(rc, rc);
9870
9871 /* Update the per-VCPU cache of the APIC base MSR. */
9872 pVCpu->hm.s.vmx.u64GstMsrApicBase = u64MsrApicBase;
9873 return VINF_SUCCESS;
9874}
9875
9876
9877#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9878/**
9879 * Merges the guest with the nested-guest MSR bitmap in preparation of executing the
9880 * nested-guest using hardware-assisted VMX.
9881 *
9882 * @param pVCpu The cross context virtual CPU structure.
9883 * @param pVmcsInfoNstGst The nested-guest VMCS info. object.
9884 * @param pVmcsInfoGst The guest VMCS info. object.
9885 */
9886static void hmR0VmxMergeMsrBitmapNested(PCVMCPU pVCpu, PVMXVMCSINFO pVmcsInfoNstGst, PCVMXVMCSINFO pVmcsInfoGst)
9887{
9888 uint64_t const *pu64MsrBitmapNstGst = (uint64_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap);
9889 uint64_t const *pu64MsrBitmapGst = (uint64_t const *)pVmcsInfoGst->pvMsrBitmap;
9890 uint64_t *pu64MsrBitmap = (uint64_t *)pVmcsInfoNstGst->pvMsrBitmap;
9891 Assert(pu64MsrBitmapNstGst);
9892 Assert(pu64MsrBitmapGst);
9893 Assert(pu64MsrBitmap);
9894
9895 /*
9896 * We merge the guest MSR bitmap with the nested-guest MSR bitmap such that any
9897 * MSR that is intercepted by the guest is also intercepted while executing the
9898 * nested-guest using hardware-assisted VMX.
9899 */
9900 uint32_t const cbFrag = sizeof(uint64_t);
9901 uint32_t const cFrags = X86_PAGE_4K_SIZE / cbFrag;
9902 for (uint32_t i = 0; i <= cFrags; i++)
9903 pu64MsrBitmap[i] = pu64MsrBitmapNstGst[i] | pu64MsrBitmapGst[i];
9904}
9905
9906
9907/**
9908 * Merges the guest VMCS in to the nested-guest VMCS controls in preparation of
9909 * hardware-assisted VMX execution of the nested-guest.
9910 *
9911 * For a guest, we don't modify these controls once we set up the VMCS.
9912 *
9913 * For nested-guests since the guest hypervisor provides these controls on every
9914 * nested-guest VM-entry and could potentially change them everytime we need to
9915 * merge them before every nested-guest VM-entry.
9916 *
9917 * @returns VBox status code.
9918 * @param pVCpu The cross context virtual CPU structure.
9919 */
9920static int hmR0VmxMergeVmcsNested(PVMCPU pVCpu)
9921{
9922 PVM pVM = pVCpu->CTX_SUFF(pVM);
9923 PCVMXVMCSINFO pVmcsInfoGst = &pVCpu->hm.s.vmx.VmcsInfo;
9924 PCVMXVVMCS pVmcsNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
9925 Assert(pVmcsNstGst);
9926
9927 /*
9928 * Merge the controls with the requirements of the guest VMCS.
9929 *
9930 * We do not need to validate the nested-guest VMX features specified in the
9931 * nested-guest VMCS with the features supported by the physical CPU as it's
9932 * already done by the VMLAUNCH/VMRESUME instruction emulation.
9933 *
9934 * This is because the VMX features exposed by CPUM (through CPUID/MSRs) to the
9935 * guest are derived from the VMX features supported by the physical CPU.
9936 */
9937
9938 /* Pin-based VM-execution controls. */
9939 uint32_t const u32PinCtls = pVmcsNstGst->u32PinCtls | pVmcsInfoGst->u32PinCtls;
9940
9941 /* Processor-based VM-execution controls. */
9942 uint32_t u32ProcCtls = (pVmcsNstGst->u32ProcCtls & ~VMX_PROC_CTLS_USE_IO_BITMAPS)
9943 | (pVmcsInfoGst->u32ProcCtls & ~( VMX_PROC_CTLS_INT_WINDOW_EXIT
9944 | VMX_PROC_CTLS_NMI_WINDOW_EXIT
9945 | VMX_PROC_CTLS_USE_TPR_SHADOW
9946 | VMX_PROC_CTLS_MONITOR_TRAP_FLAG));
9947
9948 /* Secondary processor-based VM-execution controls. */
9949 uint32_t const u32ProcCtls2 = (pVmcsNstGst->u32ProcCtls2 & ~VMX_PROC_CTLS2_VPID)
9950 | (pVmcsInfoGst->u32ProcCtls2 & ~( VMX_PROC_CTLS2_VIRT_APIC_ACCESS
9951 | VMX_PROC_CTLS2_INVPCID
9952 | VMX_PROC_CTLS2_RDTSCP
9953 | VMX_PROC_CTLS2_XSAVES_XRSTORS
9954 | VMX_PROC_CTLS2_APIC_REG_VIRT
9955 | VMX_PROC_CTLS2_VIRT_INT_DELIVERY
9956 | VMX_PROC_CTLS2_VMFUNC));
9957
9958 /*
9959 * VM-entry controls:
9960 * These controls contains state that depends on the nested-guest state (primarily
9961 * EFER MSR) and is thus not constant through VMLAUNCH/VMRESUME and the nested-guest
9962 * VM-exit. Although the nested-hypervisor cannot change it, we need to in order to
9963 * properly continue executing the nested-guest if the EFER MSR changes but does not
9964 * cause a nested-guest VM-exits.
9965 *
9966 * VM-exit controls:
9967 * These controls specify the host state on return. We cannot use the controls from
9968 * the nested-hypervisor state as is as it would contain the guest state rather than
9969 * the host state. Since the host state is subject to change (e.g. preemption, trips
9970 * to ring-3, longjmp and rescheduling to a different host CPU) they are not constant
9971 * through VMLAUNCH/VMRESUME and the nested-guest VM-exit.
9972 *
9973 * VM-entry MSR-load:
9974 * The guest MSRs from the VM-entry MSR-load area are already loaded into the
9975 * guest-CPU context by the VMLAUNCH/VMRESUME instruction emulation.
9976 *
9977 * VM-exit MSR-store:
9978 * The VM-exit emulation will take care of populating the MSRs from the guest-CPU
9979 * context back into the VM-exit MSR-store area.
9980 *
9981 * VM-exit MSR-load areas:
9982 * This must contain the real host MSRs with hardware-assisted VMX execution. Hence,
9983 * we can entirely ignore what the nested-hypervisor wants to load here.
9984 */
9985
9986 /*
9987 * Exception bitmap.
9988 *
9989 * We could remove #UD from the guest bitmap and merge it with the nested-guest
9990 * bitmap here (and avoid doing anything while exporting nested-guest state), but to
9991 * keep the code more flexible if intercepting exceptions become more dynamic in
9992 * the future we do it as part of exporting the nested-guest state.
9993 */
9994 uint32_t const u32XcptBitmap = pVmcsNstGst->u32XcptBitmap | pVmcsInfoGst->u32XcptBitmap;
9995
9996 /*
9997 * CR0/CR4 guest/host mask.
9998 *
9999 * Modifications by the nested-guest to CR0/CR4 bits owned by the host and the guest
10000 * must cause VM-exits, so we need to merge them here.
10001 */
10002 uint64_t const u64Cr0Mask = pVmcsNstGst->u64Cr0Mask.u | pVmcsInfoGst->u64Cr0Mask;
10003 uint64_t const u64Cr4Mask = pVmcsNstGst->u64Cr4Mask.u | pVmcsInfoGst->u64Cr4Mask;
10004
10005 /*
10006 * Page-fault error-code mask and match.
10007 *
10008 * Although we require unrestricted guest execution (and thereby nested-paging) for
10009 * hardware-assisted VMX execution of nested-guests and thus the outer guest doesn't
10010 * normally intercept #PFs, it might intercept them for debugging purposes.
10011 *
10012 * If the outer guest is not intercepting #PFs, we can use the nested-guest #PF
10013 * filters. If the outer guest is intercepting #PFs we must intercept all #PFs.
10014 */
10015 uint32_t u32XcptPFMask;
10016 uint32_t u32XcptPFMatch;
10017 if (!(pVmcsInfoGst->u32XcptBitmap & RT_BIT(X86_XCPT_PF)))
10018 {
10019 u32XcptPFMask = pVmcsNstGst->u32XcptPFMask;
10020 u32XcptPFMatch = pVmcsNstGst->u32XcptPFMatch;
10021 }
10022 else
10023 {
10024 u32XcptPFMask = 0;
10025 u32XcptPFMatch = 0;
10026 }
10027
10028 /*
10029 * Pause-Loop exiting.
10030 */
10031 uint32_t const cPleGapTicks = RT_MIN(pVM->hm.s.vmx.cPleGapTicks, pVmcsNstGst->u32PleGap);
10032 uint32_t const cPleWindowTicks = RT_MIN(pVM->hm.s.vmx.cPleWindowTicks, pVmcsNstGst->u32PleWindow);
10033
10034 /*
10035 * I/O Bitmap.
10036 *
10037 * We do not use the I/O bitmap that may be provided by the guest hypervisor as we
10038 * always intercept all I/O port accesses.
10039 */
10040 Assert(u32ProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT);
10041
10042 /*
10043 * APIC-access page.
10044 *
10045 * The APIC-access page address has already been initialized while setting up the
10046 * nested-guest VMCS. In theory, even if the guest-physical address is invalid, it
10047 * should not be on any consequence to the host or to the guest for that matter, but
10048 * we only accept valid addresses verified by the VMLAUNCH/VMRESUME instruction
10049 * emulation to keep it simple.
10050 */
10051
10052 /*
10053 * Virtual-APIC page and TPR threshold.
10054 *
10055 * We shall use the host-physical address of the virtual-APIC page in guest memory directly.
10056 * For this reason, we can access the virtual-APIC page of the nested-guest only using
10057 * PGM physical handlers as we must not assume a kernel virtual-address mapping exists and
10058 * requesting PGM for a mapping could be expensive/resource intensive (PGM mapping cache).
10059 */
10060 RTHCPHYS HCPhysVirtApic = NIL_RTHCPHYS;
10061 uint32_t const u32TprThreshold = pVmcsNstGst->u32TprThreshold;
10062 if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
10063 {
10064 int rc = PGMPhysGCPhys2HCPhys(pVM, pVmcsNstGst->u64AddrVirtApic.u, &HCPhysVirtApic);
10065
10066 /*
10067 * If the guest hypervisor has loaded crap into the virtual-APIC page field
10068 * we would fail to obtain a valid host-physical address for its guest-physical
10069 * address.
10070 *
10071 * We currently do not support this scenario. Maybe in the future if there is a
10072 * pressing need we can explore making this particular set of conditions work.
10073 * Right now we just cause a VM-entry failure.
10074 *
10075 * This has already been checked by VMLAUNCH/VMRESUME instruction emulation,
10076 * so should not really failure at the moment.
10077 */
10078 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
10079 }
10080 else
10081 {
10082 /*
10083 * We must make sure CR8 reads/write must cause VM-exits when TPR shadowing is not
10084 * used by the guest hypervisor. Preventing MMIO accesses to the physical APIC will
10085 * be taken care of by EPT/shadow paging.
10086 */
10087 if (pVM->hm.s.fAllow64BitGuests)
10088 {
10089 u32ProcCtls |= VMX_PROC_CTLS_CR8_STORE_EXIT
10090 | VMX_PROC_CTLS_CR8_LOAD_EXIT;
10091 }
10092 }
10093
10094 /*
10095 * Validate basic assumptions.
10096 */
10097 PVMXVMCSINFO pVmcsInfoNstGst = &pVCpu->hm.s.vmx.VmcsInfoNstGst;
10098 Assert(pVM->hm.s.vmx.fAllowUnrestricted);
10099 Assert(pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS);
10100 Assert(hmGetVmxActiveVmcsInfo(pVCpu) == pVmcsInfoNstGst);
10101
10102 /*
10103 * Commit it to the nested-guest VMCS.
10104 */
10105 int rc = VINF_SUCCESS;
10106 if (pVmcsInfoNstGst->u32PinCtls != u32PinCtls)
10107 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, u32PinCtls);
10108 if (pVmcsInfoNstGst->u32ProcCtls != u32ProcCtls)
10109 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, u32ProcCtls);
10110 if (pVmcsInfoNstGst->u32ProcCtls2 != u32ProcCtls2)
10111 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, u32ProcCtls2);
10112 if (pVmcsInfoNstGst->u32XcptBitmap != u32XcptBitmap)
10113 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
10114 if (pVmcsInfoNstGst->u64Cr0Mask != u64Cr0Mask)
10115 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask);
10116 if (pVmcsInfoNstGst->u64Cr4Mask != u64Cr4Mask)
10117 rc |= VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask);
10118 if (pVmcsInfoNstGst->u32XcptPFMask != u32XcptPFMask)
10119 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, u32XcptPFMask);
10120 if (pVmcsInfoNstGst->u32XcptPFMatch != u32XcptPFMatch)
10121 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, u32XcptPFMatch);
10122 if ( !(u32ProcCtls & VMX_PROC_CTLS_PAUSE_EXIT)
10123 && (u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10124 {
10125 Assert(pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT);
10126 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, cPleGapTicks);
10127 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, cPleWindowTicks);
10128 }
10129 if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
10130 {
10131 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
10132 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, HCPhysVirtApic);
10133 }
10134 AssertRCReturn(rc, rc);
10135
10136 /*
10137 * Update the nested-guest VMCS cache.
10138 */
10139 pVmcsInfoNstGst->u32PinCtls = u32PinCtls;
10140 pVmcsInfoNstGst->u32ProcCtls = u32ProcCtls;
10141 pVmcsInfoNstGst->u32ProcCtls2 = u32ProcCtls2;
10142 pVmcsInfoNstGst->u32XcptBitmap = u32XcptBitmap;
10143 pVmcsInfoNstGst->u64Cr0Mask = u64Cr0Mask;
10144 pVmcsInfoNstGst->u64Cr4Mask = u64Cr4Mask;
10145 pVmcsInfoNstGst->u32XcptPFMask = u32XcptPFMask;
10146 pVmcsInfoNstGst->u32XcptPFMatch = u32XcptPFMatch;
10147 pVmcsInfoNstGst->HCPhysVirtApic = HCPhysVirtApic;
10148
10149 /*
10150 * MSR bitmap.
10151 *
10152 * The MSR bitmap address has already been initialized while setting up the
10153 * nested-guest VMCS, here we need to merge the MSR bitmaps.
10154 */
10155 if (u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
10156 hmR0VmxMergeMsrBitmapNested(pVCpu, pVmcsInfoNstGst, pVmcsInfoGst);
10157
10158 return VINF_SUCCESS;
10159}
10160#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10161
10162
10163/**
10164 * Does the preparations before executing guest code in VT-x.
10165 *
10166 * This may cause longjmps to ring-3 and may even result in rescheduling to the
10167 * recompiler/IEM. We must be cautious what we do here regarding committing
10168 * guest-state information into the VMCS assuming we assuredly execute the
10169 * guest in VT-x mode.
10170 *
10171 * If we fall back to the recompiler/IEM after updating the VMCS and clearing
10172 * the common-state (TRPM/forceflags), we must undo those changes so that the
10173 * recompiler/IEM can (and should) use them when it resumes guest execution.
10174 * Otherwise such operations must be done when we can no longer exit to ring-3.
10175 *
10176 * @returns Strict VBox status code (i.e. informational status codes too).
10177 * @retval VINF_SUCCESS if we can proceed with running the guest, interrupts
10178 * have been disabled.
10179 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a
10180 * double-fault into the guest.
10181 * @retval VINF_EM_DBG_STEPPED if @a fStepping is true and an event was
10182 * dispatched directly.
10183 * @retval VINF_* scheduling changes, we have to go back to ring-3.
10184 *
10185 * @param pVCpu The cross context virtual CPU structure.
10186 * @param pVmxTransient The VMX-transient structure.
10187 * @param fStepping Whether we are single-stepping the guest in the
10188 * hypervisor debugger. Makes us ignore some of the reasons
10189 * for returning to ring-3, and return VINF_EM_DBG_STEPPED
10190 * if event dispatching took place.
10191 */
10192static VBOXSTRICTRC hmR0VmxPreRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, bool fStepping)
10193{
10194 Assert(VMMRZCallRing3IsEnabled(pVCpu));
10195
10196#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
10197 if (pVmxTransient->fIsNestedGuest)
10198 {
10199 RT_NOREF2(pVCpu, fStepping);
10200 Log2Func(("Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
10201 return VINF_EM_RESCHEDULE_REM;
10202 }
10203#endif
10204
10205#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
10206 PGMRZDynMapFlushAutoSet(pVCpu);
10207#endif
10208
10209 /*
10210 * Check and process force flag actions, some of which might require us to go back to ring-3.
10211 */
10212 VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVCpu, fStepping);
10213 if (rcStrict == VINF_SUCCESS)
10214 { /* FFs don't get set all the time. */ }
10215 else
10216 return rcStrict;
10217
10218#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10219 /*
10220 * Switch to the nested-guest VMCS as we may have transitioned into executing
10221 * the nested-guest without leaving ring-0. Otherwise, if we came from ring-3
10222 * we would load the nested-guest VMCS while entering the VMX ring-0 session.
10223 *
10224 * We do this as late as possible to minimize (though not completely remove)
10225 * clearing/loading VMCS again due to premature trips to ring-3 above.
10226 */
10227 if (pVmxTransient->fIsNestedGuest)
10228 {
10229 if (!pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs)
10230 {
10231 /*
10232 * Ensure we have synced everything from the guest VMCS and also flag that
10233 * that we need to export the full (nested) guest-CPU context to the
10234 * nested-guest VMCS.
10235 */
10236 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
10237 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST);
10238
10239 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
10240 int rc = hmR0VmxSwitchVmcs(&pVCpu->hm.s.vmx.VmcsInfo, &pVCpu->hm.s.vmx.VmcsInfoNstGst);
10241 if (RT_LIKELY(rc == VINF_SUCCESS))
10242 {
10243 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs = true;
10244 ASMSetFlags(fEFlags);
10245 pVmxTransient->pVmcsInfo = &pVCpu->hm.s.vmx.VmcsInfoNstGst;
10246
10247 /*
10248 * We use a different VM-exit MSR-store area for the nested-guest. Hence,
10249 * flag that we need to update the host MSR values there. Even if we decide
10250 * in the future to share the VM-exit MSR-store area page with the guest,
10251 * if its content differs, we would have to update the host MSRs anyway.
10252 */
10253 pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = false;
10254 Assert(!pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer); /** @todo NSTVMX: Paranoia remove later. */
10255 }
10256 else
10257 {
10258 ASMSetFlags(fEFlags);
10259 return rc;
10260 }
10261 }
10262
10263 /*
10264 * Merge guest VMCS controls with the nested-guest VMCS controls.
10265 *
10266 * Even if we have not executed the guest prior to this (e.g. when resuming
10267 * from a saved state), we should be okay with merging controls as we
10268 * initialize the guest VMCS controls as part of VM setup phase.
10269 */
10270 if (!pVCpu->hm.s.vmx.fMergedNstGstCtls)
10271 {
10272 int rc = hmR0VmxMergeVmcsNested(pVCpu);
10273 AssertRCReturn(rc, rc);
10274 pVCpu->hm.s.vmx.fMergedNstGstCtls = true;
10275 }
10276 }
10277#endif
10278
10279 /*
10280 * Virtualize memory-mapped accesses to the physical APIC (may take locks).
10281 * We look at the guest VMCS control here as we always set it when supported by
10282 * the physical CPU. Looking at the nested-guest control here would not be
10283 * possible because they are not merged yet.
10284 */
10285 PVM pVM = pVCpu->CTX_SUFF(pVM);
10286 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10287 Assert(pVmcsInfo);
10288 if ( !pVCpu->hm.s.vmx.u64GstMsrApicBase
10289 && (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
10290 && PDMHasApic(pVM))
10291 {
10292 int rc = hmR0VmxMapHCApicAccessPage(pVCpu);
10293 AssertRCReturn(rc, rc);
10294 }
10295
10296 /*
10297 * Evaluate events to be injected into the guest.
10298 *
10299 * Events in TRPM can be injected without inspecting the guest state.
10300 * If any new events (interrupts/NMI) are pending currently, we try to set up the
10301 * guest to cause a VM-exit the next time they are ready to receive the event.
10302 */
10303 if (TRPMHasTrap(pVCpu))
10304 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
10305
10306 uint32_t fIntrState;
10307 rcStrict = hmR0VmxEvaluatePendingEvent(pVCpu, pVmxTransient, &fIntrState);
10308
10309#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10310 /*
10311 * While evaluating pending events if something failed (unlikely) or if we were
10312 * preparing to run a nested-guest but performed a nested-guest VM-exit, we should bail.
10313 */
10314 if ( rcStrict != VINF_SUCCESS
10315 || ( pVmxTransient->fIsNestedGuest
10316 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)))
10317 return rcStrict;
10318#endif
10319
10320 /*
10321 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus
10322 * needs to be done with longjmps or interrupts + preemption enabled. Event injection might
10323 * also result in triple-faulting the VM.
10324 *
10325 * The above does not apply when executing a nested-guest (since unrestricted guest execution
10326 * is a requirement) regardless doing it avoid duplicating code elsewhere.
10327 */
10328 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pVmxTransient, fIntrState, fStepping);
10329 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10330 { /* likely */ }
10331 else
10332 {
10333 AssertMsg(rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
10334 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10335 return rcStrict;
10336 }
10337
10338 /*
10339 * A longjump might result in importing CR3 even for VM-exits that don't necessarily
10340 * import CR3 themselves. We will need to update them here, as even as late as the above
10341 * hmR0VmxInjectPendingEvent() call may lazily import guest-CPU state on demand causing
10342 * the below force flags to be set.
10343 */
10344 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
10345 {
10346 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
10347 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
10348 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
10349 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
10350 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
10351 }
10352 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
10353 {
10354 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
10355 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
10356 }
10357
10358#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10359 /* Paranoia. */
10360 Assert(!pVmxTransient->fIsNestedGuest || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
10361#endif
10362
10363 /*
10364 * No longjmps to ring-3 from this point on!!!
10365 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
10366 * This also disables flushing of the R0-logger instance (if any).
10367 */
10368 VMMRZCallRing3Disable(pVCpu);
10369
10370 /*
10371 * Export the guest state bits.
10372 *
10373 * We cannot perform longjmps while loading the guest state because we do not preserve the
10374 * host/guest state (although the VMCS will be preserved) across longjmps which can cause
10375 * CPU migration.
10376 *
10377 * If we are injecting events to a real-on-v86 mode guest, we would have updated RIP and some segment
10378 * registers. Hence, loading of the guest state needs to be done -after- injection of events.
10379 */
10380 rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu, pVmxTransient);
10381 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10382 { /* likely */ }
10383 else
10384 {
10385 VMMRZCallRing3Enable(pVCpu);
10386 return rcStrict;
10387 }
10388
10389 /*
10390 * We disable interrupts so that we don't miss any interrupts that would flag preemption
10391 * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with
10392 * preemption disabled for a while. Since this is purely to aid the
10393 * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and
10394 * disable interrupt on NT.
10395 *
10396 * We need to check for force-flags that could've possible been altered since we last
10397 * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section,
10398 * see @bugref{6398}).
10399 *
10400 * We also check a couple of other force-flags as a last opportunity to get the EMT back
10401 * to ring-3 before executing guest code.
10402 */
10403 pVmxTransient->fEFlags = ASMIntDisableFlags();
10404
10405 if ( ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
10406 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
10407 || ( fStepping /* Optimized for the non-stepping case, so a bit of unnecessary work when stepping. */
10408 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) )
10409 {
10410 if (!RTThreadPreemptIsPending(NIL_RTTHREAD))
10411 {
10412 pVCpu->hm.s.Event.fPending = false;
10413
10414 /*
10415 * We've injected any pending events. This is really the point of no return (to ring-3).
10416 *
10417 * Note! The caller expects to continue with interrupts & longjmps disabled on successful
10418 * returns from this function, so don't enable them here.
10419 */
10420 return VINF_SUCCESS;
10421 }
10422
10423 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPendingHostIrq);
10424 rcStrict = VINF_EM_RAW_INTERRUPT;
10425 }
10426 else
10427 {
10428 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
10429 rcStrict = VINF_EM_RAW_TO_R3;
10430 }
10431
10432 ASMSetFlags(pVmxTransient->fEFlags);
10433 VMMRZCallRing3Enable(pVCpu);
10434
10435 return rcStrict;
10436}
10437
10438
10439/**
10440 * Final preparations before executing guest code using hardware-assisted VMX.
10441 *
10442 * We can no longer get preempted to a different host CPU and there are no returns
10443 * to ring-3. We ignore any errors that may happen from this point (e.g. VMWRITE
10444 * failures), this function is not intended to fail sans unrecoverable hardware
10445 * errors.
10446 *
10447 * @param pVCpu The cross context virtual CPU structure.
10448 * @param pVmxTransient The VMX-transient structure.
10449 *
10450 * @remarks Called with preemption disabled.
10451 * @remarks No-long-jump zone!!!
10452 */
10453static void hmR0VmxPreRunGuestCommitted(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
10454{
10455 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
10456 Assert(VMMR0IsLogFlushDisabled(pVCpu));
10457 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
10458 Assert(!pVCpu->hm.s.Event.fPending);
10459
10460 /*
10461 * Indicate start of guest execution and where poking EMT out of guest-context is recognized.
10462 */
10463 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
10464 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
10465
10466 PVM pVM = pVCpu->CTX_SUFF(pVM);
10467 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10468
10469 if (!CPUMIsGuestFPUStateActive(pVCpu))
10470 {
10471 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);
10472 if (CPUMR0LoadGuestFPU(pVM, pVCpu) == VINF_CPUM_HOST_CR0_MODIFIED)
10473 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT;
10474 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);
10475 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);
10476 }
10477
10478 /*
10479 * Re-save the host state bits as we may've been preempted (only happens when
10480 * thread-context hooks are used or when the VM start function changes).
10481 * The 64-on-32 switcher saves the (64-bit) host state into the VMCS and if we
10482 * changed the switcher back to 32-bit, we *must* save the 32-bit host state here,
10483 * see @bugref{8432}.
10484 *
10485 * This may also happen when switching to/from a nested-guest VMCS without leaving
10486 * ring-0.
10487 */
10488 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)
10489 {
10490 int rc = hmR0VmxExportHostState(pVCpu);
10491 AssertRC(rc);
10492 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreemptExportHostState);
10493 }
10494 Assert(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT));
10495
10496 /*
10497 * Export the state shared between host and guest (FPU, debug, lazy MSRs).
10498 */
10499 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)
10500 hmR0VmxExportSharedState(pVCpu, pVmxTransient);
10501 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
10502
10503 /*
10504 * Store status of the shared guest/host debug state at the time of VM-entry.
10505 */
10506#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
10507 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
10508 {
10509 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
10510 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
10511 }
10512 else
10513#endif
10514 {
10515 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
10516 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
10517 }
10518
10519 /*
10520 * Always cache the TPR-shadow if the virtual-APIC page exists, thereby skipping
10521 * more than one conditional check. The post-run side of our code shall determine
10522 * if it needs to sync. the virtual APIC TPR with the TPR-shadow.
10523 */
10524 if (pVmcsInfo->pbVirtApic)
10525 pVmxTransient->u8GuestTpr = pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR];
10526
10527 /*
10528 * Update the host MSRs values in the VM-exit MSR-load area.
10529 */
10530 if (!pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs)
10531 {
10532 if (pVmcsInfo->cExitMsrLoad > 0)
10533 hmR0VmxUpdateAutoLoadHostMsrs(pVCpu, pVmcsInfo);
10534 pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs = true;
10535 }
10536
10537 /*
10538 * Evaluate if we need to intercept guest RDTSC/P accesses. Set up the
10539 * VMX-preemption timer based on the next virtual sync clock deadline.
10540 */
10541 PHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
10542 RTCPUID const idCurrentCpu = pHostCpu->idCpu;
10543 if ( !pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer
10544 || idCurrentCpu != pVCpu->hm.s.idLastCpu)
10545 {
10546 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pVmxTransient);
10547 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = true;
10548 }
10549
10550 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */
10551 hmR0VmxFlushTaggedTlb(pHostCpu, pVCpu, pVmcsInfo); /* Invalidate the appropriate guest entries from the TLB. */
10552 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
10553 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */
10554
10555 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
10556
10557 TMNotifyStartOfExecution(pVCpu); /* Notify TM to resume its clocks when TSC is tied to execution,
10558 as we're about to start executing the guest . */
10559
10560 /*
10561 * Load the guest TSC_AUX MSR when we are not intercepting RDTSCP.
10562 *
10563 * This is done this late as updating the TSC offsetting/preemption timer above
10564 * figures out if we can skip intercepting RDTSCP by calculating the number of
10565 * host CPU ticks till the next virtual sync deadline (for the dynamic case).
10566 */
10567 if (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_RDTSCP)
10568 {
10569 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))
10570 {
10571 hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_TSC_AUX);
10572 /* NB: Because we call hmR0VmxAddAutoLoadStoreMsr with fUpdateHostMsr=true,
10573 it's safe even after hmR0VmxUpdateAutoLoadHostMsrs has already been done. */
10574 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu),
10575 true /* fSetReadWrite */, true /* fUpdateHostMsr */);
10576 AssertRC(rc);
10577 }
10578 else
10579 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_TSC_AUX);
10580 }
10581
10582#ifdef VBOX_STRICT
10583 Assert(pVCpu->hm.s.vmx.fUpdatedHostAutoMsrs);
10584 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu, pVmcsInfo);
10585 hmR0VmxCheckHostEferMsr(pVCpu, pVmcsInfo);
10586 AssertRC(hmR0VmxCheckVmcsCtls(pVCpu, pVmcsInfo));
10587#endif
10588
10589#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
10590 /** @todo r=ramshankar: We can now probably use iemVmxVmentryCheckGuestState here.
10591 * Add a PVMXMSRS parameter to it, so that IEM can look at the host MSRs. */
10592 uint32_t const uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pVmcsInfo);
10593 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
10594 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
10595#endif
10596}
10597
10598
10599/**
10600 * First C routine invoked after running guest code using hardware-assisted VMX.
10601 *
10602 * @param pVCpu The cross context virtual CPU structure.
10603 * @param pVmxTransient The VMX-transient structure.
10604 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
10605 *
10606 * @remarks Called with interrupts disabled, and returns with interrupts enabled!
10607 *
10608 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
10609 * unconditionally when it is safe to do so.
10610 */
10611static void hmR0VmxPostRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, int rcVMRun)
10612{
10613 uint64_t const uHostTsc = ASMReadTSC(); /** @todo We can do a lot better here, see @bugref{9180#c38}. */
10614
10615 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
10616 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */
10617 pVCpu->hm.s.fCtxChanged = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */
10618 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
10619 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
10620 pVmxTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
10621
10622 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
10623 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))
10624 {
10625 uint64_t uGstTsc;
10626 if (!pVmxTransient->fIsNestedGuest)
10627 uGstTsc = uHostTsc + pVmcsInfo->u64TscOffset;
10628 else
10629 {
10630 uint64_t const uNstGstTsc = uHostTsc + pVmcsInfo->u64TscOffset;
10631 uGstTsc = CPUMRemoveNestedGuestTscOffset(pVCpu, uNstGstTsc);
10632 }
10633 TMCpuTickSetLastSeen(pVCpu, uGstTsc); /* Update TM with the guest TSC. */
10634 }
10635
10636 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x);
10637 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
10638 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
10639
10640#if HC_ARCH_BITS == 64
10641 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Some host state messed up by VMX needs restoring. */
10642#endif
10643#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
10644 /* The 64-on-32 switcher maintains VMCS-launch state on its own
10645 and we need to leave it alone here. */
10646 if (pVmcsInfo->pfnStartVM != VMXR0SwitcherStartVM64)
10647 pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
10648#else
10649 pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
10650#endif
10651#ifdef VBOX_STRICT
10652 hmR0VmxCheckHostEferMsr(pVCpu, pVmcsInfo); /* Verify that the host EFER MSR wasn't modified. */
10653#endif
10654 Assert(!ASMIntAreEnabled());
10655 ASMSetFlags(pVmxTransient->fEFlags); /* Enable interrupts. */
10656 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
10657
10658 /*
10659 * Save the basic VM-exit reason and check if the VM-entry failed.
10660 * See Intel spec. 24.9.1 "Basic VM-exit Information".
10661 */
10662 uint32_t uExitReason;
10663 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
10664 AssertRC(rc);
10665 pVmxTransient->uExitReason = VMX_EXIT_REASON_BASIC(uExitReason);
10666 pVmxTransient->fVMEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
10667
10668 /*
10669 * Check if VMLAUNCH/VMRESUME succeeded.
10670 * If this failed, we cause a guru meditation and cease further execution.
10671 */
10672 if (RT_LIKELY(rcVMRun == VINF_SUCCESS))
10673 {
10674 /*
10675 * Update the VM-exit history array here even if the VM-entry failed due to:
10676 * - Invalid guest state.
10677 * - MSR loading.
10678 * - Machine-check event.
10679 *
10680 * In any of the above cases we will still have a "valid" VM-exit reason
10681 * despite @a fVMEntryFailed being false.
10682 *
10683 * See Intel spec. 26.7 "VM-Entry failures during or after loading guest state".
10684 *
10685 * Note! We don't have CS or RIP at this point. Will probably address that later
10686 * by amending the history entry added here.
10687 */
10688 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_VMX, pVmxTransient->uExitReason & EMEXIT_F_TYPE_MASK),
10689 UINT64_MAX, uHostTsc);
10690
10691 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
10692 {
10693 VMMRZCallRing3Enable(pVCpu);
10694
10695 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
10696 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
10697
10698#if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
10699 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
10700 AssertRC(rc);
10701#elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
10702 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_RFLAGS);
10703 AssertRC(rc);
10704#else
10705 /*
10706 * Import the guest-interruptibility state always as we need it while evaluating
10707 * injecting events on re-entry.
10708 *
10709 * We don't import CR0 (when unrestricted guest execution is unavailable) despite
10710 * checking for real-mode while exporting the state because all bits that cause
10711 * mode changes wrt CR0 are intercepted.
10712 */
10713 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_HM_VMX_INT_STATE);
10714 AssertRC(rc);
10715#endif
10716
10717 /*
10718 * Sync the TPR shadow with our APIC state.
10719 */
10720 if ( !pVmxTransient->fIsNestedGuest
10721 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
10722 {
10723 Assert(pVmcsInfo->pbVirtApic);
10724 if (pVmxTransient->u8GuestTpr != pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR])
10725 {
10726 rc = APICSetTpr(pVCpu, pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR]);
10727 AssertRC(rc);
10728 ASMAtomicOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
10729 }
10730 }
10731
10732 Assert(VMMRZCallRing3IsEnabled(pVCpu));
10733 return;
10734 }
10735 }
10736 else
10737 Log4Func(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));
10738
10739 VMMRZCallRing3Enable(pVCpu);
10740}
10741
10742
10743/**
10744 * Runs the guest code using hardware-assisted VMX the normal way.
10745 *
10746 * @returns VBox status code.
10747 * @param pVCpu The cross context virtual CPU structure.
10748 * @param pcLoops Pointer to the number of executed loops.
10749 */
10750static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVMCPU pVCpu, uint32_t *pcLoops)
10751{
10752 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops;
10753 Assert(pcLoops);
10754 Assert(*pcLoops <= cMaxResumeLoops);
10755
10756 VMXTRANSIENT VmxTransient;
10757 RT_ZERO(VmxTransient);
10758 VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
10759
10760 /* Paranoia. */
10761 Assert(VmxTransient.pVmcsInfo == &pVCpu->hm.s.vmx.VmcsInfo);
10762 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
10763
10764 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
10765 for (;;)
10766 {
10767 Assert(!HMR0SuspendPending());
10768 HMVMX_ASSERT_CPU_SAFE(pVCpu);
10769 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
10770
10771 /*
10772 * Preparatory work for running nested-guest code, this may force us to
10773 * return to ring-3.
10774 *
10775 * Warning! This bugger disables interrupts on VINF_SUCCESS!
10776 */
10777 rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */);
10778 if (rcStrict != VINF_SUCCESS)
10779 break;
10780
10781 /* Interrupts are disabled at this point! */
10782 hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient);
10783 int rcRun = hmR0VmxRunGuest(pVCpu, &VmxTransient);
10784 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
10785 /* Interrupts are re-enabled at this point! */
10786
10787 /*
10788 * Check for errors with running the VM (VMLAUNCH/VMRESUME).
10789 */
10790 if (RT_SUCCESS(rcRun))
10791 { /* very likely */ }
10792 else
10793 {
10794 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
10795 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
10796 return rcRun;
10797 }
10798
10799 /*
10800 * Profile the VM-exit.
10801 */
10802 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
10803 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
10804 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
10805 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
10806 HMVMX_START_EXIT_DISPATCH_PROF();
10807
10808 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
10809
10810 /*
10811 * Handle the VM-exit.
10812 */
10813#ifdef HMVMX_USE_FUNCTION_TABLE
10814 rcStrict = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, &VmxTransient);
10815#else
10816 rcStrict = hmR0VmxHandleExit(pVCpu, &VmxTransient);
10817#endif
10818 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
10819 if (rcStrict == VINF_SUCCESS)
10820 {
10821 if (++(*pcLoops) <= cMaxResumeLoops)
10822 continue;
10823 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
10824 rcStrict = VINF_EM_RAW_INTERRUPT;
10825 }
10826 break;
10827 }
10828
10829 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
10830 return rcStrict;
10831}
10832
10833#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10834/**
10835 * Runs the nested-guest code using hardware-assisted VMX.
10836 *
10837 * @returns VBox status code.
10838 * @param pVCpu The cross context virtual CPU structure.
10839 * @param pcLoops Pointer to the number of executed loops.
10840 *
10841 * @sa hmR0VmxRunGuestCodeNormal.
10842 */
10843static VBOXSTRICTRC hmR0VmxRunGuestCodeNested(PVMCPU pVCpu, uint32_t *pcLoops)
10844{
10845 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops;
10846 Assert(pcLoops);
10847 Assert(*pcLoops <= cMaxResumeLoops);
10848
10849 VMXTRANSIENT VmxTransient;
10850 RT_ZERO(VmxTransient);
10851 VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
10852 VmxTransient.fIsNestedGuest = true;
10853
10854 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
10855 for (;;)
10856 {
10857 Assert(!HMR0SuspendPending());
10858 HMVMX_ASSERT_CPU_SAFE(pVCpu);
10859 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
10860
10861 /*
10862 * Preparatory work for running guest code, this may force us to
10863 * return to ring-3.
10864 *
10865 * Warning! This bugger disables interrupts on VINF_SUCCESS!
10866 */
10867 rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */);
10868 if (rcStrict != VINF_SUCCESS)
10869 break;
10870
10871 /* Interrupts are disabled at this point! */
10872 hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient);
10873 int rcRun = hmR0VmxRunGuest(pVCpu, &VmxTransient);
10874 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
10875 /* Interrupts are re-enabled at this point! */
10876
10877 /*
10878 * Check for errors with running the VM (VMLAUNCH/VMRESUME).
10879 */
10880 if (RT_SUCCESS(rcRun))
10881 { /* very likely */ }
10882 else
10883 {
10884 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
10885 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
10886 return rcRun;
10887 }
10888
10889 /*
10890 * Profile the VM-exit.
10891 */
10892 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
10893 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
10894 STAM_COUNTER_INC(&pVCpu->hm.s.paStatNestedExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
10895 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
10896 HMVMX_START_EXIT_DISPATCH_PROF();
10897
10898 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
10899
10900 /*
10901 * Handle the VM-exit.
10902 */
10903 rcStrict = hmR0VmxHandleExitNested(pVCpu, &VmxTransient);
10904 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
10905 if ( rcStrict == VINF_SUCCESS
10906 && CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
10907 {
10908 if (++(*pcLoops) <= cMaxResumeLoops)
10909 continue;
10910 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
10911 rcStrict = VINF_EM_RAW_INTERRUPT;
10912 }
10913 break;
10914 }
10915
10916 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
10917 return rcStrict;
10918}
10919#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10920
10921
10922/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
10923 * probes.
10924 *
10925 * The following few functions and associated structure contains the bloat
10926 * necessary for providing detailed debug events and dtrace probes as well as
10927 * reliable host side single stepping. This works on the principle of
10928 * "subclassing" the normal execution loop and workers. We replace the loop
10929 * method completely and override selected helpers to add necessary adjustments
10930 * to their core operation.
10931 *
10932 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
10933 * any performance for debug and analysis features.
10934 *
10935 * @{
10936 */
10937
10938/**
10939 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
10940 * the debug run loop.
10941 */
10942typedef struct VMXRUNDBGSTATE
10943{
10944 /** The RIP we started executing at. This is for detecting that we stepped. */
10945 uint64_t uRipStart;
10946 /** The CS we started executing with. */
10947 uint16_t uCsStart;
10948
10949 /** Whether we've actually modified the 1st execution control field. */
10950 bool fModifiedProcCtls : 1;
10951 /** Whether we've actually modified the 2nd execution control field. */
10952 bool fModifiedProcCtls2 : 1;
10953 /** Whether we've actually modified the exception bitmap. */
10954 bool fModifiedXcptBitmap : 1;
10955
10956 /** We desire the modified the CR0 mask to be cleared. */
10957 bool fClearCr0Mask : 1;
10958 /** We desire the modified the CR4 mask to be cleared. */
10959 bool fClearCr4Mask : 1;
10960 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
10961 uint32_t fCpe1Extra;
10962 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
10963 uint32_t fCpe1Unwanted;
10964 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
10965 uint32_t fCpe2Extra;
10966 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
10967 uint32_t bmXcptExtra;
10968 /** The sequence number of the Dtrace provider settings the state was
10969 * configured against. */
10970 uint32_t uDtraceSettingsSeqNo;
10971 /** VM-exits to check (one bit per VM-exit). */
10972 uint32_t bmExitsToCheck[3];
10973
10974 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
10975 uint32_t fProcCtlsInitial;
10976 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
10977 uint32_t fProcCtls2Initial;
10978 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
10979 uint32_t bmXcptInitial;
10980} VMXRUNDBGSTATE;
10981AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
10982typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
10983
10984
10985/**
10986 * Initializes the VMXRUNDBGSTATE structure.
10987 *
10988 * @param pVCpu The cross context virtual CPU structure of the
10989 * calling EMT.
10990 * @param pVmxTransient The VMX-transient structure.
10991 * @param pDbgState The debug state to initialize.
10992 */
10993static void hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
10994{
10995 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
10996 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
10997
10998 pDbgState->fModifiedProcCtls = false;
10999 pDbgState->fModifiedProcCtls2 = false;
11000 pDbgState->fModifiedXcptBitmap = false;
11001 pDbgState->fClearCr0Mask = false;
11002 pDbgState->fClearCr4Mask = false;
11003 pDbgState->fCpe1Extra = 0;
11004 pDbgState->fCpe1Unwanted = 0;
11005 pDbgState->fCpe2Extra = 0;
11006 pDbgState->bmXcptExtra = 0;
11007 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
11008 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
11009 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
11010}
11011
11012
11013/**
11014 * Updates the VMSC fields with changes requested by @a pDbgState.
11015 *
11016 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
11017 * immediately before executing guest code, i.e. when interrupts are disabled.
11018 * We don't check status codes here as we cannot easily assert or return in the
11019 * latter case.
11020 *
11021 * @param pVCpu The cross context virtual CPU structure.
11022 * @param pVmxTransient The VMX-transient structure.
11023 * @param pDbgState The debug state.
11024 */
11025static void hmR0VmxPreRunGuestDebugStateApply(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11026{
11027 /*
11028 * Ensure desired flags in VMCS control fields are set.
11029 * (Ignoring write failure here, as we're committed and it's just debug extras.)
11030 *
11031 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
11032 * there should be no stale data in pCtx at this point.
11033 */
11034 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11035 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
11036 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
11037 {
11038 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
11039 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
11040 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
11041 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
11042 pDbgState->fModifiedProcCtls = true;
11043 }
11044
11045 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11046 {
11047 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11048 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11049 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11050 pDbgState->fModifiedProcCtls2 = true;
11051 }
11052
11053 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11054 {
11055 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11056 VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11057 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11058 pDbgState->fModifiedXcptBitmap = true;
11059 }
11060
11061 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11062 {
11063 pVmcsInfo->u64Cr0Mask = 0;
11064 VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, 0);
11065 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11066 }
11067
11068 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11069 {
11070 pVmcsInfo->u64Cr4Mask = 0;
11071 VMXWriteVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, 0);
11072 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11073 }
11074
11075 NOREF(pVCpu);
11076}
11077
11078
11079/**
11080 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11081 * re-entry next time around.
11082 *
11083 * @returns Strict VBox status code (i.e. informational status codes too).
11084 * @param pVCpu The cross context virtual CPU structure.
11085 * @param pVmxTransient The VMX-transient structure.
11086 * @param pDbgState The debug state.
11087 * @param rcStrict The return code from executing the guest using single
11088 * stepping.
11089 */
11090static VBOXSTRICTRC hmR0VmxRunDebugStateRevert(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11091 VBOXSTRICTRC rcStrict)
11092{
11093 /*
11094 * Restore VM-exit control settings as we may not reenter this function the
11095 * next time around.
11096 */
11097 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11098
11099 /* We reload the initial value, trigger what we can of recalculations the
11100 next time around. From the looks of things, that's all that's required atm. */
11101 if (pDbgState->fModifiedProcCtls)
11102 {
11103 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11104 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11105 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11106 AssertRCReturn(rc2, rc2);
11107 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11108 }
11109
11110 /* We're currently the only ones messing with this one, so just restore the
11111 cached value and reload the field. */
11112 if ( pDbgState->fModifiedProcCtls2
11113 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11114 {
11115 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11116 AssertRCReturn(rc2, rc2);
11117 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11118 }
11119
11120 /* If we've modified the exception bitmap, we restore it and trigger
11121 reloading and partial recalculation the next time around. */
11122 if (pDbgState->fModifiedXcptBitmap)
11123 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11124
11125 return rcStrict;
11126}
11127
11128
11129/**
11130 * Configures VM-exit controls for current DBGF and DTrace settings.
11131 *
11132 * This updates @a pDbgState and the VMCS execution control fields to reflect
11133 * the necessary VM-exits demanded by DBGF and DTrace.
11134 *
11135 * @param pVCpu The cross context virtual CPU structure.
11136 * @param pVmxTransient The VMX-transient structure. May update
11137 * fUpdatedTscOffsettingAndPreemptTimer.
11138 * @param pDbgState The debug state.
11139 */
11140static void hmR0VmxPreRunGuestDebugStateUpdate(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11141{
11142 /*
11143 * Take down the dtrace serial number so we can spot changes.
11144 */
11145 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11146 ASMCompilerBarrier();
11147
11148 /*
11149 * We'll rebuild most of the middle block of data members (holding the
11150 * current settings) as we go along here, so start by clearing it all.
11151 */
11152 pDbgState->bmXcptExtra = 0;
11153 pDbgState->fCpe1Extra = 0;
11154 pDbgState->fCpe1Unwanted = 0;
11155 pDbgState->fCpe2Extra = 0;
11156 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11157 pDbgState->bmExitsToCheck[i] = 0;
11158
11159 /*
11160 * Software interrupts (INT XXh) - no idea how to trigger these...
11161 */
11162 PVM pVM = pVCpu->CTX_SUFF(pVM);
11163 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11164 || VBOXVMM_INT_SOFTWARE_ENABLED())
11165 {
11166 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11167 }
11168
11169 /*
11170 * INT3 breakpoints - triggered by #BP exceptions.
11171 */
11172 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11173 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11174
11175 /*
11176 * Exception bitmap and XCPT events+probes.
11177 */
11178 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11179 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11180 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11181
11182 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11183 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11184 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11185 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11186 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11187 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11188 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11189 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11190 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11191 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11192 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11193 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11194 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11195 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11196 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11197 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11198 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11199 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11200
11201 if (pDbgState->bmXcptExtra)
11202 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11203
11204 /*
11205 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11206 *
11207 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11208 * So, when adding/changing/removing please don't forget to update it.
11209 *
11210 * Some of the macros are picking up local variables to save horizontal space,
11211 * (being able to see it in a table is the lesser evil here).
11212 */
11213#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11214 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11215 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11216#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11217 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11218 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11219 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11220 } else do { } while (0)
11221#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11222 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11223 { \
11224 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11225 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11226 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11227 } else do { } while (0)
11228#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11229 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11230 { \
11231 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11232 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11233 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11234 } else do { } while (0)
11235#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11236 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11237 { \
11238 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11239 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11240 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11241 } else do { } while (0)
11242
11243 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11244 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11245 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11246 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11247 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11248
11249 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11250 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11251 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11252 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11253 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11254 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11255 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11256 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11257 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11258 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11259 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11260 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11261 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11262 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11263 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11264 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11265 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11266 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11267 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11268 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11269 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11270 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11271 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11272 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11273 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11274 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11275 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11276 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11277 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11278 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11279 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11280 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11281 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11282 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11283 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11284 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11285
11286 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11287 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11288 {
11289 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
11290 | CPUMCTX_EXTRN_APIC_TPR);
11291 AssertRC(rc);
11292
11293#if 0 /** @todo fix me */
11294 pDbgState->fClearCr0Mask = true;
11295 pDbgState->fClearCr4Mask = true;
11296#endif
11297 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11298 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11299 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11300 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11301 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11302 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11303 require clearing here and in the loop if we start using it. */
11304 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11305 }
11306 else
11307 {
11308 if (pDbgState->fClearCr0Mask)
11309 {
11310 pDbgState->fClearCr0Mask = false;
11311 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);
11312 }
11313 if (pDbgState->fClearCr4Mask)
11314 {
11315 pDbgState->fClearCr4Mask = false;
11316 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4);
11317 }
11318 }
11319 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11320 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11321
11322 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11323 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11324 {
11325 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11326 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11327 }
11328 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11329 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11330
11331 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11332 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11333 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11334 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11335 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11336 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11337 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11338 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11339#if 0 /** @todo too slow, fix handler. */
11340 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11341#endif
11342 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11343
11344 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11345 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11346 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11347 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11348 {
11349 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11350 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11351 }
11352 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11353 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11354 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11355 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11356
11357 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11358 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11359 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11360 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11361 {
11362 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11363 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11364 }
11365 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11366 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11367 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11368 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11369
11370 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11371 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11372 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11373 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11374 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11375 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11376 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11377 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11378 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11379 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11380 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11381 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
11382 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
11383 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
11384 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
11385 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
11386 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
11387 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
11388 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
11389 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
11390 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
11391 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
11392
11393#undef IS_EITHER_ENABLED
11394#undef SET_ONLY_XBM_IF_EITHER_EN
11395#undef SET_CPE1_XBM_IF_EITHER_EN
11396#undef SET_CPEU_XBM_IF_EITHER_EN
11397#undef SET_CPE2_XBM_IF_EITHER_EN
11398
11399 /*
11400 * Sanitize the control stuff.
11401 */
11402 pDbgState->fCpe2Extra &= pVM->hm.s.vmx.Msrs.ProcCtls2.n.allowed1;
11403 if (pDbgState->fCpe2Extra)
11404 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
11405 pDbgState->fCpe1Extra &= pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed1;
11406 pDbgState->fCpe1Unwanted &= ~pVM->hm.s.vmx.Msrs.ProcCtls.n.allowed0;
11407 if (pVCpu->hm.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
11408 {
11409 pVCpu->hm.s.fDebugWantRdTscExit ^= true;
11410 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
11411 }
11412
11413 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
11414 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
11415 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
11416 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
11417}
11418
11419
11420/**
11421 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
11422 * appropriate.
11423 *
11424 * The caller has checked the VM-exit against the
11425 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
11426 * already, so we don't have to do that either.
11427 *
11428 * @returns Strict VBox status code (i.e. informational status codes too).
11429 * @param pVCpu The cross context virtual CPU structure.
11430 * @param pVmxTransient The VMX-transient structure.
11431 * @param uExitReason The VM-exit reason.
11432 *
11433 * @remarks The name of this function is displayed by dtrace, so keep it short
11434 * and to the point. No longer than 33 chars long, please.
11435 */
11436static VBOXSTRICTRC hmR0VmxHandleExitDtraceEvents(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
11437{
11438 /*
11439 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
11440 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
11441 *
11442 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
11443 * does. Must add/change/remove both places. Same ordering, please.
11444 *
11445 * Added/removed events must also be reflected in the next section
11446 * where we dispatch dtrace events.
11447 */
11448 bool fDtrace1 = false;
11449 bool fDtrace2 = false;
11450 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
11451 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
11452 uint32_t uEventArg = 0;
11453#define SET_EXIT(a_EventSubName) \
11454 do { \
11455 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11456 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11457 } while (0)
11458#define SET_BOTH(a_EventSubName) \
11459 do { \
11460 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
11461 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
11462 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
11463 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
11464 } while (0)
11465 switch (uExitReason)
11466 {
11467 case VMX_EXIT_MTF:
11468 return hmR0VmxExitMtf(pVCpu, pVmxTransient);
11469
11470 case VMX_EXIT_XCPT_OR_NMI:
11471 {
11472 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
11473 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
11474 {
11475 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
11476 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
11477 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
11478 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
11479 {
11480 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
11481 {
11482 hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
11483 uEventArg = pVmxTransient->uExitIntErrorCode;
11484 }
11485 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
11486 switch (enmEvent1)
11487 {
11488 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
11489 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
11490 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
11491 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
11492 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
11493 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
11494 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
11495 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
11496 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
11497 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
11498 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
11499 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
11500 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
11501 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
11502 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
11503 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
11504 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
11505 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
11506 default: break;
11507 }
11508 }
11509 else
11510 AssertFailed();
11511 break;
11512
11513 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
11514 uEventArg = idxVector;
11515 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
11516 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
11517 break;
11518 }
11519 break;
11520 }
11521
11522 case VMX_EXIT_TRIPLE_FAULT:
11523 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
11524 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
11525 break;
11526 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
11527 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
11528 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
11529 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
11530 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
11531
11532 /* Instruction specific VM-exits: */
11533 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
11534 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
11535 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
11536 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
11537 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
11538 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
11539 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
11540 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
11541 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
11542 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
11543 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
11544 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
11545 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
11546 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
11547 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
11548 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
11549 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
11550 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
11551 case VMX_EXIT_MOV_CRX:
11552 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
11553 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
11554 SET_BOTH(CRX_READ);
11555 else
11556 SET_BOTH(CRX_WRITE);
11557 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
11558 break;
11559 case VMX_EXIT_MOV_DRX:
11560 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
11561 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
11562 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
11563 SET_BOTH(DRX_READ);
11564 else
11565 SET_BOTH(DRX_WRITE);
11566 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
11567 break;
11568 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
11569 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
11570 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
11571 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
11572 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
11573 case VMX_EXIT_GDTR_IDTR_ACCESS:
11574 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
11575 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
11576 {
11577 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
11578 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
11579 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
11580 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
11581 }
11582 break;
11583
11584 case VMX_EXIT_LDTR_TR_ACCESS:
11585 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
11586 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
11587 {
11588 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
11589 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
11590 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
11591 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
11592 }
11593 break;
11594
11595 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
11596 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
11597 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
11598 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
11599 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
11600 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
11601 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
11602 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
11603 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
11604 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
11605 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
11606
11607 /* Events that aren't relevant at this point. */
11608 case VMX_EXIT_EXT_INT:
11609 case VMX_EXIT_INT_WINDOW:
11610 case VMX_EXIT_NMI_WINDOW:
11611 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11612 case VMX_EXIT_PREEMPT_TIMER:
11613 case VMX_EXIT_IO_INSTR:
11614 break;
11615
11616 /* Errors and unexpected events. */
11617 case VMX_EXIT_INIT_SIGNAL:
11618 case VMX_EXIT_SIPI:
11619 case VMX_EXIT_IO_SMI:
11620 case VMX_EXIT_SMI:
11621 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11622 case VMX_EXIT_ERR_MSR_LOAD:
11623 case VMX_EXIT_ERR_MACHINE_CHECK:
11624 break;
11625
11626 default:
11627 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11628 break;
11629 }
11630#undef SET_BOTH
11631#undef SET_EXIT
11632
11633 /*
11634 * Dtrace tracepoints go first. We do them here at once so we don't
11635 * have to copy the guest state saving and stuff a few dozen times.
11636 * Down side is that we've got to repeat the switch, though this time
11637 * we use enmEvent since the probes are a subset of what DBGF does.
11638 */
11639 if (fDtrace1 || fDtrace2)
11640 {
11641 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
11642 hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11643 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
11644 switch (enmEvent1)
11645 {
11646 /** @todo consider which extra parameters would be helpful for each probe. */
11647 case DBGFEVENT_END: break;
11648 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
11649 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
11650 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
11651 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
11652 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
11653 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
11654 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
11655 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
11656 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
11657 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
11658 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
11659 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
11660 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
11661 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
11662 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
11663 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
11664 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
11665 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
11666 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11667 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11668 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
11669 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
11670 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
11671 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
11672 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
11673 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
11674 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
11675 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11676 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11677 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11678 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11679 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11680 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
11681 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11682 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
11683 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
11684 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
11685 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
11686 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
11687 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
11688 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
11689 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
11690 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
11691 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
11692 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
11693 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
11694 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
11695 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
11696 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
11697 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
11698 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
11699 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
11700 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
11701 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
11702 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
11703 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
11704 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
11705 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
11706 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
11707 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
11708 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
11709 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
11710 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
11711 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
11712 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
11713 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
11714 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
11715 }
11716 switch (enmEvent2)
11717 {
11718 /** @todo consider which extra parameters would be helpful for each probe. */
11719 case DBGFEVENT_END: break;
11720 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
11721 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
11722 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
11723 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
11724 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
11725 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
11726 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
11727 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
11728 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
11729 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11730 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11731 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
11732 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
11733 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
11734 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
11735 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
11736 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
11737 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
11738 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
11739 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
11740 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
11741 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
11742 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
11743 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
11744 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
11745 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
11746 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
11747 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
11748 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
11749 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
11750 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
11751 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
11752 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
11753 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
11754 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
11755 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
11756 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
11757 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
11758 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
11759 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
11760 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
11761 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
11762 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
11763 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
11764 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
11765 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
11766 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
11767 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
11768 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
11769 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
11770 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
11771 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
11772 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
11773 }
11774 }
11775
11776 /*
11777 * Fire of the DBGF event, if enabled (our check here is just a quick one,
11778 * the DBGF call will do a full check).
11779 *
11780 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
11781 * Note! If we have to events, we prioritize the first, i.e. the instruction
11782 * one, in order to avoid event nesting.
11783 */
11784 PVM pVM = pVCpu->CTX_SUFF(pVM);
11785 if ( enmEvent1 != DBGFEVENT_END
11786 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
11787 {
11788 hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11789 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
11790 if (rcStrict != VINF_SUCCESS)
11791 return rcStrict;
11792 }
11793 else if ( enmEvent2 != DBGFEVENT_END
11794 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
11795 {
11796 hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11797 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
11798 if (rcStrict != VINF_SUCCESS)
11799 return rcStrict;
11800 }
11801
11802 return VINF_SUCCESS;
11803}
11804
11805
11806/**
11807 * Single-stepping VM-exit filtering.
11808 *
11809 * This is preprocessing the VM-exits and deciding whether we've gotten far
11810 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
11811 * handling is performed.
11812 *
11813 * @returns Strict VBox status code (i.e. informational status codes too).
11814 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11815 * @param pVmxTransient The VMX-transient structure.
11816 * @param pDbgState The debug state.
11817 */
11818DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11819{
11820 /*
11821 * Expensive (saves context) generic dtrace VM-exit probe.
11822 */
11823 uint32_t const uExitReason = pVmxTransient->uExitReason;
11824 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
11825 { /* more likely */ }
11826 else
11827 {
11828 hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
11829 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
11830 AssertRC(rc);
11831 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
11832 }
11833
11834 /*
11835 * Check for host NMI, just to get that out of the way.
11836 */
11837 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
11838 { /* normally likely */ }
11839 else
11840 {
11841 int rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
11842 AssertRCReturn(rc2, rc2);
11843 uint32_t uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
11844 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
11845 return hmR0VmxExitXcptOrNmi(pVCpu, pVmxTransient);
11846 }
11847
11848 /*
11849 * Check for single stepping event if we're stepping.
11850 */
11851 if (pVCpu->hm.s.fSingleInstruction)
11852 {
11853 switch (uExitReason)
11854 {
11855 case VMX_EXIT_MTF:
11856 return hmR0VmxExitMtf(pVCpu, pVmxTransient);
11857
11858 /* Various events: */
11859 case VMX_EXIT_XCPT_OR_NMI:
11860 case VMX_EXIT_EXT_INT:
11861 case VMX_EXIT_TRIPLE_FAULT:
11862 case VMX_EXIT_INT_WINDOW:
11863 case VMX_EXIT_NMI_WINDOW:
11864 case VMX_EXIT_TASK_SWITCH:
11865 case VMX_EXIT_TPR_BELOW_THRESHOLD:
11866 case VMX_EXIT_APIC_ACCESS:
11867 case VMX_EXIT_EPT_VIOLATION:
11868 case VMX_EXIT_EPT_MISCONFIG:
11869 case VMX_EXIT_PREEMPT_TIMER:
11870
11871 /* Instruction specific VM-exits: */
11872 case VMX_EXIT_CPUID:
11873 case VMX_EXIT_GETSEC:
11874 case VMX_EXIT_HLT:
11875 case VMX_EXIT_INVD:
11876 case VMX_EXIT_INVLPG:
11877 case VMX_EXIT_RDPMC:
11878 case VMX_EXIT_RDTSC:
11879 case VMX_EXIT_RSM:
11880 case VMX_EXIT_VMCALL:
11881 case VMX_EXIT_VMCLEAR:
11882 case VMX_EXIT_VMLAUNCH:
11883 case VMX_EXIT_VMPTRLD:
11884 case VMX_EXIT_VMPTRST:
11885 case VMX_EXIT_VMREAD:
11886 case VMX_EXIT_VMRESUME:
11887 case VMX_EXIT_VMWRITE:
11888 case VMX_EXIT_VMXOFF:
11889 case VMX_EXIT_VMXON:
11890 case VMX_EXIT_MOV_CRX:
11891 case VMX_EXIT_MOV_DRX:
11892 case VMX_EXIT_IO_INSTR:
11893 case VMX_EXIT_RDMSR:
11894 case VMX_EXIT_WRMSR:
11895 case VMX_EXIT_MWAIT:
11896 case VMX_EXIT_MONITOR:
11897 case VMX_EXIT_PAUSE:
11898 case VMX_EXIT_GDTR_IDTR_ACCESS:
11899 case VMX_EXIT_LDTR_TR_ACCESS:
11900 case VMX_EXIT_INVEPT:
11901 case VMX_EXIT_RDTSCP:
11902 case VMX_EXIT_INVVPID:
11903 case VMX_EXIT_WBINVD:
11904 case VMX_EXIT_XSETBV:
11905 case VMX_EXIT_RDRAND:
11906 case VMX_EXIT_INVPCID:
11907 case VMX_EXIT_VMFUNC:
11908 case VMX_EXIT_RDSEED:
11909 case VMX_EXIT_XSAVES:
11910 case VMX_EXIT_XRSTORS:
11911 {
11912 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
11913 AssertRCReturn(rc, rc);
11914 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
11915 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
11916 return VINF_EM_DBG_STEPPED;
11917 break;
11918 }
11919
11920 /* Errors and unexpected events: */
11921 case VMX_EXIT_INIT_SIGNAL:
11922 case VMX_EXIT_SIPI:
11923 case VMX_EXIT_IO_SMI:
11924 case VMX_EXIT_SMI:
11925 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
11926 case VMX_EXIT_ERR_MSR_LOAD:
11927 case VMX_EXIT_ERR_MACHINE_CHECK:
11928 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
11929 break;
11930
11931 default:
11932 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
11933 break;
11934 }
11935 }
11936
11937 /*
11938 * Check for debugger event breakpoints and dtrace probes.
11939 */
11940 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
11941 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
11942 {
11943 VBOXSTRICTRC rcStrict = hmR0VmxHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
11944 if (rcStrict != VINF_SUCCESS)
11945 return rcStrict;
11946 }
11947
11948 /*
11949 * Normal processing.
11950 */
11951#ifdef HMVMX_USE_FUNCTION_TABLE
11952 return g_apfnVMExitHandlers[uExitReason](pVCpu, pVmxTransient);
11953#else
11954 return hmR0VmxHandleExit(pVCpu, pVmxTransient, uExitReason);
11955#endif
11956}
11957
11958
11959/**
11960 * Single steps guest code using hardware-assisted VMX.
11961 *
11962 * This is -not- the same as the guest single-stepping itself (say using EFLAGS.TF)
11963 * but single-stepping through the hypervisor debugger.
11964 *
11965 * @returns Strict VBox status code (i.e. informational status codes too).
11966 * @param pVCpu The cross context virtual CPU structure.
11967 * @param pcLoops Pointer to the number of executed loops.
11968 *
11969 * @note Mostly the same as hmR0VmxRunGuestCodeNormal().
11970 */
11971static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVMCPU pVCpu, uint32_t *pcLoops)
11972{
11973 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hm.s.cMaxResumeLoops;
11974 Assert(pcLoops);
11975 Assert(*pcLoops <= cMaxResumeLoops);
11976
11977 VMXTRANSIENT VmxTransient;
11978 RT_ZERO(VmxTransient);
11979 VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
11980
11981 /* Set HMCPU indicators. */
11982 bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction;
11983 pVCpu->hm.s.fSingleInstruction = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
11984 pVCpu->hm.s.fDebugWantRdTscExit = false;
11985 pVCpu->hm.s.fUsingDebugLoop = true;
11986
11987 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */
11988 VMXRUNDBGSTATE DbgState;
11989 hmR0VmxRunDebugStateInit(pVCpu, &VmxTransient, &DbgState);
11990 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);
11991
11992 /*
11993 * The loop.
11994 */
11995 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
11996 for (;;)
11997 {
11998 Assert(!HMR0SuspendPending());
11999 HMVMX_ASSERT_CPU_SAFE(pVCpu);
12000 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
12001 bool fStepping = pVCpu->hm.s.fSingleInstruction;
12002
12003 /* Set up VM-execution controls the next two can respond to. */
12004 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
12005
12006 /*
12007 * Preparatory work for running guest code, this may force us to
12008 * return to ring-3.
12009 *
12010 * Warning! This bugger disables interrupts on VINF_SUCCESS!
12011 */
12012 rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, fStepping);
12013 if (rcStrict != VINF_SUCCESS)
12014 break;
12015
12016 /* Interrupts are disabled at this point! */
12017 hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient);
12018
12019 /* Override any obnoxious code in the above two calls. */
12020 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
12021
12022 /*
12023 * Finally execute the guest.
12024 */
12025 int rcRun = hmR0VmxRunGuest(pVCpu, &VmxTransient);
12026
12027 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
12028 /* Interrupts are re-enabled at this point! */
12029
12030 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
12031 if (RT_SUCCESS(rcRun))
12032 { /* very likely */ }
12033 else
12034 {
12035 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
12036 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
12037 return rcRun;
12038 }
12039
12040 /* Profile the VM-exit. */
12041 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
12042 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
12043 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
12044 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
12045 HMVMX_START_EXIT_DISPATCH_PROF();
12046
12047 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
12048
12049 /*
12050 * Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitDebug().
12051 */
12052 rcStrict = hmR0VmxRunDebugHandleExit(pVCpu, &VmxTransient, &DbgState);
12053 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
12054 if (rcStrict != VINF_SUCCESS)
12055 break;
12056 if (++(*pcLoops) > cMaxResumeLoops)
12057 {
12058 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
12059 rcStrict = VINF_EM_RAW_INTERRUPT;
12060 break;
12061 }
12062
12063 /*
12064 * Stepping: Did the RIP change, if so, consider it a single step.
12065 * Otherwise, make sure one of the TFs gets set.
12066 */
12067 if (fStepping)
12068 {
12069 int rc = hmR0VmxImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
12070 AssertRC(rc);
12071 if ( pVCpu->cpum.GstCtx.rip != DbgState.uRipStart
12072 || pVCpu->cpum.GstCtx.cs.Sel != DbgState.uCsStart)
12073 {
12074 rcStrict = VINF_EM_DBG_STEPPED;
12075 break;
12076 }
12077 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
12078 }
12079
12080 /*
12081 * Update when dtrace settings changes (DBGF kicks us, so no need to check).
12082 */
12083 if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo)
12084 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);
12085 }
12086
12087 /*
12088 * Clear the X86_EFL_TF if necessary.
12089 */
12090 if (pVCpu->hm.s.fClearTrapFlag)
12091 {
12092 int rc = hmR0VmxImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
12093 AssertRC(rc);
12094 pVCpu->hm.s.fClearTrapFlag = false;
12095 pVCpu->cpum.GstCtx.eflags.Bits.u1TF = 0;
12096 }
12097 /** @todo there seems to be issues with the resume flag when the monitor trap
12098 * flag is pending without being used. Seen early in bios init when
12099 * accessing APIC page in protected mode. */
12100
12101 /*
12102 * Restore VM-exit control settings as we may not re-enter this function the
12103 * next time around.
12104 */
12105 rcStrict = hmR0VmxRunDebugStateRevert(pVCpu, &VmxTransient, &DbgState, rcStrict);
12106
12107 /* Restore HMCPU indicators. */
12108 pVCpu->hm.s.fUsingDebugLoop = false;
12109 pVCpu->hm.s.fDebugWantRdTscExit = false;
12110 pVCpu->hm.s.fSingleInstruction = fSavedSingleInstruction;
12111
12112 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
12113 return rcStrict;
12114}
12115
12116
12117/** @} */
12118
12119
12120/**
12121 * Checks if any expensive dtrace probes are enabled and we should go to the
12122 * debug loop.
12123 *
12124 * @returns true if we should use debug loop, false if not.
12125 */
12126static bool hmR0VmxAnyExpensiveProbesEnabled(void)
12127{
12128 /* It's probably faster to OR the raw 32-bit counter variables together.
12129 Since the variables are in an array and the probes are next to one
12130 another (more or less), we have good locality. So, better read
12131 eight-nine cache lines ever time and only have one conditional, than
12132 128+ conditionals, right? */
12133 return ( VBOXVMM_R0_HMVMX_VMEXIT_ENABLED_RAW() /* expensive too due to context */
12134 | VBOXVMM_XCPT_DE_ENABLED_RAW()
12135 | VBOXVMM_XCPT_DB_ENABLED_RAW()
12136 | VBOXVMM_XCPT_BP_ENABLED_RAW()
12137 | VBOXVMM_XCPT_OF_ENABLED_RAW()
12138 | VBOXVMM_XCPT_BR_ENABLED_RAW()
12139 | VBOXVMM_XCPT_UD_ENABLED_RAW()
12140 | VBOXVMM_XCPT_NM_ENABLED_RAW()
12141 | VBOXVMM_XCPT_DF_ENABLED_RAW()
12142 | VBOXVMM_XCPT_TS_ENABLED_RAW()
12143 | VBOXVMM_XCPT_NP_ENABLED_RAW()
12144 | VBOXVMM_XCPT_SS_ENABLED_RAW()
12145 | VBOXVMM_XCPT_GP_ENABLED_RAW()
12146 | VBOXVMM_XCPT_PF_ENABLED_RAW()
12147 | VBOXVMM_XCPT_MF_ENABLED_RAW()
12148 | VBOXVMM_XCPT_AC_ENABLED_RAW()
12149 | VBOXVMM_XCPT_XF_ENABLED_RAW()
12150 | VBOXVMM_XCPT_VE_ENABLED_RAW()
12151 | VBOXVMM_XCPT_SX_ENABLED_RAW()
12152 | VBOXVMM_INT_SOFTWARE_ENABLED_RAW()
12153 | VBOXVMM_INT_HARDWARE_ENABLED_RAW()
12154 ) != 0
12155 || ( VBOXVMM_INSTR_HALT_ENABLED_RAW()
12156 | VBOXVMM_INSTR_MWAIT_ENABLED_RAW()
12157 | VBOXVMM_INSTR_MONITOR_ENABLED_RAW()
12158 | VBOXVMM_INSTR_CPUID_ENABLED_RAW()
12159 | VBOXVMM_INSTR_INVD_ENABLED_RAW()
12160 | VBOXVMM_INSTR_WBINVD_ENABLED_RAW()
12161 | VBOXVMM_INSTR_INVLPG_ENABLED_RAW()
12162 | VBOXVMM_INSTR_RDTSC_ENABLED_RAW()
12163 | VBOXVMM_INSTR_RDTSCP_ENABLED_RAW()
12164 | VBOXVMM_INSTR_RDPMC_ENABLED_RAW()
12165 | VBOXVMM_INSTR_RDMSR_ENABLED_RAW()
12166 | VBOXVMM_INSTR_WRMSR_ENABLED_RAW()
12167 | VBOXVMM_INSTR_CRX_READ_ENABLED_RAW()
12168 | VBOXVMM_INSTR_CRX_WRITE_ENABLED_RAW()
12169 | VBOXVMM_INSTR_DRX_READ_ENABLED_RAW()
12170 | VBOXVMM_INSTR_DRX_WRITE_ENABLED_RAW()
12171 | VBOXVMM_INSTR_PAUSE_ENABLED_RAW()
12172 | VBOXVMM_INSTR_XSETBV_ENABLED_RAW()
12173 | VBOXVMM_INSTR_SIDT_ENABLED_RAW()
12174 | VBOXVMM_INSTR_LIDT_ENABLED_RAW()
12175 | VBOXVMM_INSTR_SGDT_ENABLED_RAW()
12176 | VBOXVMM_INSTR_LGDT_ENABLED_RAW()
12177 | VBOXVMM_INSTR_SLDT_ENABLED_RAW()
12178 | VBOXVMM_INSTR_LLDT_ENABLED_RAW()
12179 | VBOXVMM_INSTR_STR_ENABLED_RAW()
12180 | VBOXVMM_INSTR_LTR_ENABLED_RAW()
12181 | VBOXVMM_INSTR_GETSEC_ENABLED_RAW()
12182 | VBOXVMM_INSTR_RSM_ENABLED_RAW()
12183 | VBOXVMM_INSTR_RDRAND_ENABLED_RAW()
12184 | VBOXVMM_INSTR_RDSEED_ENABLED_RAW()
12185 | VBOXVMM_INSTR_XSAVES_ENABLED_RAW()
12186 | VBOXVMM_INSTR_XRSTORS_ENABLED_RAW()
12187 | VBOXVMM_INSTR_VMM_CALL_ENABLED_RAW()
12188 | VBOXVMM_INSTR_VMX_VMCLEAR_ENABLED_RAW()
12189 | VBOXVMM_INSTR_VMX_VMLAUNCH_ENABLED_RAW()
12190 | VBOXVMM_INSTR_VMX_VMPTRLD_ENABLED_RAW()
12191 | VBOXVMM_INSTR_VMX_VMPTRST_ENABLED_RAW()
12192 | VBOXVMM_INSTR_VMX_VMREAD_ENABLED_RAW()
12193 | VBOXVMM_INSTR_VMX_VMRESUME_ENABLED_RAW()
12194 | VBOXVMM_INSTR_VMX_VMWRITE_ENABLED_RAW()
12195 | VBOXVMM_INSTR_VMX_VMXOFF_ENABLED_RAW()
12196 | VBOXVMM_INSTR_VMX_VMXON_ENABLED_RAW()
12197 | VBOXVMM_INSTR_VMX_VMFUNC_ENABLED_RAW()
12198 | VBOXVMM_INSTR_VMX_INVEPT_ENABLED_RAW()
12199 | VBOXVMM_INSTR_VMX_INVVPID_ENABLED_RAW()
12200 | VBOXVMM_INSTR_VMX_INVPCID_ENABLED_RAW()
12201 ) != 0
12202 || ( VBOXVMM_EXIT_TASK_SWITCH_ENABLED_RAW()
12203 | VBOXVMM_EXIT_HALT_ENABLED_RAW()
12204 | VBOXVMM_EXIT_MWAIT_ENABLED_RAW()
12205 | VBOXVMM_EXIT_MONITOR_ENABLED_RAW()
12206 | VBOXVMM_EXIT_CPUID_ENABLED_RAW()
12207 | VBOXVMM_EXIT_INVD_ENABLED_RAW()
12208 | VBOXVMM_EXIT_WBINVD_ENABLED_RAW()
12209 | VBOXVMM_EXIT_INVLPG_ENABLED_RAW()
12210 | VBOXVMM_EXIT_RDTSC_ENABLED_RAW()
12211 | VBOXVMM_EXIT_RDTSCP_ENABLED_RAW()
12212 | VBOXVMM_EXIT_RDPMC_ENABLED_RAW()
12213 | VBOXVMM_EXIT_RDMSR_ENABLED_RAW()
12214 | VBOXVMM_EXIT_WRMSR_ENABLED_RAW()
12215 | VBOXVMM_EXIT_CRX_READ_ENABLED_RAW()
12216 | VBOXVMM_EXIT_CRX_WRITE_ENABLED_RAW()
12217 | VBOXVMM_EXIT_DRX_READ_ENABLED_RAW()
12218 | VBOXVMM_EXIT_DRX_WRITE_ENABLED_RAW()
12219 | VBOXVMM_EXIT_PAUSE_ENABLED_RAW()
12220 | VBOXVMM_EXIT_XSETBV_ENABLED_RAW()
12221 | VBOXVMM_EXIT_SIDT_ENABLED_RAW()
12222 | VBOXVMM_EXIT_LIDT_ENABLED_RAW()
12223 | VBOXVMM_EXIT_SGDT_ENABLED_RAW()
12224 | VBOXVMM_EXIT_LGDT_ENABLED_RAW()
12225 | VBOXVMM_EXIT_SLDT_ENABLED_RAW()
12226 | VBOXVMM_EXIT_LLDT_ENABLED_RAW()
12227 | VBOXVMM_EXIT_STR_ENABLED_RAW()
12228 | VBOXVMM_EXIT_LTR_ENABLED_RAW()
12229 | VBOXVMM_EXIT_GETSEC_ENABLED_RAW()
12230 | VBOXVMM_EXIT_RSM_ENABLED_RAW()
12231 | VBOXVMM_EXIT_RDRAND_ENABLED_RAW()
12232 | VBOXVMM_EXIT_RDSEED_ENABLED_RAW()
12233 | VBOXVMM_EXIT_XSAVES_ENABLED_RAW()
12234 | VBOXVMM_EXIT_XRSTORS_ENABLED_RAW()
12235 | VBOXVMM_EXIT_VMM_CALL_ENABLED_RAW()
12236 | VBOXVMM_EXIT_VMX_VMCLEAR_ENABLED_RAW()
12237 | VBOXVMM_EXIT_VMX_VMLAUNCH_ENABLED_RAW()
12238 | VBOXVMM_EXIT_VMX_VMPTRLD_ENABLED_RAW()
12239 | VBOXVMM_EXIT_VMX_VMPTRST_ENABLED_RAW()
12240 | VBOXVMM_EXIT_VMX_VMREAD_ENABLED_RAW()
12241 | VBOXVMM_EXIT_VMX_VMRESUME_ENABLED_RAW()
12242 | VBOXVMM_EXIT_VMX_VMWRITE_ENABLED_RAW()
12243 | VBOXVMM_EXIT_VMX_VMXOFF_ENABLED_RAW()
12244 | VBOXVMM_EXIT_VMX_VMXON_ENABLED_RAW()
12245 | VBOXVMM_EXIT_VMX_VMFUNC_ENABLED_RAW()
12246 | VBOXVMM_EXIT_VMX_INVEPT_ENABLED_RAW()
12247 | VBOXVMM_EXIT_VMX_INVVPID_ENABLED_RAW()
12248 | VBOXVMM_EXIT_VMX_INVPCID_ENABLED_RAW()
12249 | VBOXVMM_EXIT_VMX_EPT_VIOLATION_ENABLED_RAW()
12250 | VBOXVMM_EXIT_VMX_EPT_MISCONFIG_ENABLED_RAW()
12251 | VBOXVMM_EXIT_VMX_VAPIC_ACCESS_ENABLED_RAW()
12252 | VBOXVMM_EXIT_VMX_VAPIC_WRITE_ENABLED_RAW()
12253 ) != 0;
12254}
12255
12256
12257/**
12258 * Runs the guest using hardware-assisted VMX.
12259 *
12260 * @returns Strict VBox status code (i.e. informational status codes too).
12261 * @param pVCpu The cross context virtual CPU structure.
12262 */
12263VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPU pVCpu)
12264{
12265 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
12266 Assert(VMMRZCallRing3IsEnabled(pVCpu));
12267 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
12268 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
12269
12270 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pCtx);
12271
12272 VBOXSTRICTRC rcStrict;
12273 uint32_t cLoops = 0;
12274#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12275 bool const fInNestedGuestMode = CPUMIsGuestInVmxNonRootMode(pCtx);
12276#else
12277 bool const fInNestedGuestMode = false;
12278#endif
12279 if (!fInNestedGuestMode)
12280 {
12281 if ( !pVCpu->hm.s.fUseDebugLoop
12282 && (!VBOXVMM_ANY_PROBES_ENABLED() || !hmR0VmxAnyExpensiveProbesEnabled())
12283 && !DBGFIsStepping(pVCpu)
12284 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
12285 rcStrict = hmR0VmxRunGuestCodeNormal(pVCpu, &cLoops);
12286 else
12287 rcStrict = hmR0VmxRunGuestCodeDebug(pVCpu, &cLoops);
12288 }
12289#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12290 else
12291 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
12292
12293 if (rcStrict == VINF_VMX_VMLAUNCH_VMRESUME)
12294 rcStrict = hmR0VmxRunGuestCodeNested(pVCpu, &cLoops);
12295#endif
12296
12297 if (rcStrict == VERR_EM_INTERPRETER)
12298 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
12299 else if (rcStrict == VINF_EM_RESET)
12300 rcStrict = VINF_EM_TRIPLE_FAULT;
12301
12302 int rc2 = hmR0VmxExitToRing3(pVCpu, rcStrict);
12303 if (RT_FAILURE(rc2))
12304 {
12305 pVCpu->hm.s.u32HMError = (uint32_t)VBOXSTRICTRC_VAL(rcStrict);
12306 rcStrict = rc2;
12307 }
12308 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
12309 Assert(!VMMRZCallRing3IsNotificationSet(pVCpu));
12310 return rcStrict;
12311}
12312
12313
12314#ifndef HMVMX_USE_FUNCTION_TABLE
12315/**
12316 * Handles a guest VM-exit from hardware-assisted VMX execution.
12317 *
12318 * @returns Strict VBox status code (i.e. informational status codes too).
12319 * @param pVCpu The cross context virtual CPU structure.
12320 * @param pVmxTransient The VMX-transient structure.
12321 */
12322DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12323{
12324#ifdef DEBUG_ramshankar
12325#define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
12326 do { \
12327 if (a_fSave != 0) \
12328 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); \
12329 VBOXSTRICTRC rcStrict = a_CallExpr; \
12330 if (a_fSave != 0) \
12331 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \
12332 return rcStrict; \
12333 } while (0)
12334#else
12335# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
12336#endif
12337 uint32_t const rcReason = pVmxTransient->uExitReason;
12338 switch (rcReason)
12339 {
12340 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, hmR0VmxExitEptMisconfig(pVCpu, pVmxTransient));
12341 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, hmR0VmxExitEptViolation(pVCpu, pVmxTransient));
12342 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, hmR0VmxExitIoInstr(pVCpu, pVmxTransient));
12343 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0VmxExitCpuid(pVCpu, pVmxTransient));
12344 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0VmxExitRdtsc(pVCpu, pVmxTransient));
12345 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0VmxExitRdtscp(pVCpu, pVmxTransient));
12346 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitApicAccess(pVCpu, pVmxTransient));
12347 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, hmR0VmxExitXcptOrNmi(pVCpu, pVmxTransient));
12348 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovCRx(pVCpu, pVmxTransient));
12349 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, hmR0VmxExitExtInt(pVCpu, pVmxTransient));
12350 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, hmR0VmxExitIntWindow(pVCpu, pVmxTransient));
12351 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, hmR0VmxExitTprBelowThreshold(pVCpu, pVmxTransient));
12352 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0VmxExitMwait(pVCpu, pVmxTransient));
12353 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0VmxExitMonitor(pVCpu, pVmxTransient));
12354 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0VmxExitTaskSwitch(pVCpu, pVmxTransient));
12355 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, hmR0VmxExitPreemptTimer(pVCpu, pVmxTransient));
12356 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, hmR0VmxExitRdmsr(pVCpu, pVmxTransient));
12357 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, hmR0VmxExitWrmsr(pVCpu, pVmxTransient));
12358 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, hmR0VmxExitVmcall(pVCpu, pVmxTransient));
12359 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovDRx(pVCpu, pVmxTransient));
12360 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0VmxExitHlt(pVCpu, pVmxTransient));
12361 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0VmxExitInvd(pVCpu, pVmxTransient));
12362 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0VmxExitInvlpg(pVCpu, pVmxTransient));
12363 case VMX_EXIT_RSM: VMEXIT_CALL_RET(0, hmR0VmxExitRsm(pVCpu, pVmxTransient));
12364 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, hmR0VmxExitMtf(pVCpu, pVmxTransient));
12365 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0VmxExitPause(pVCpu, pVmxTransient));
12366 case VMX_EXIT_GDTR_IDTR_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pVmxTransient));
12367 case VMX_EXIT_LDTR_TR_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pVmxTransient));
12368 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0VmxExitWbinvd(pVCpu, pVmxTransient));
12369 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0VmxExitXsetbv(pVCpu, pVmxTransient));
12370 case VMX_EXIT_RDRAND: VMEXIT_CALL_RET(0, hmR0VmxExitRdrand(pVCpu, pVmxTransient));
12371 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, hmR0VmxExitInvpcid(pVCpu, pVmxTransient));
12372 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, hmR0VmxExitGetsec(pVCpu, pVmxTransient));
12373 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0VmxExitRdpmc(pVCpu, pVmxTransient));
12374#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12375 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, hmR0VmxExitVmclear(pVCpu, pVmxTransient));
12376 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, hmR0VmxExitVmlaunch(pVCpu, pVmxTransient));
12377 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, hmR0VmxExitVmptrld(pVCpu, pVmxTransient));
12378 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, hmR0VmxExitVmptrst(pVCpu, pVmxTransient));
12379 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, hmR0VmxExitVmread(pVCpu, pVmxTransient));
12380 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, hmR0VmxExitVmwrite(pVCpu, pVmxTransient));
12381 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, hmR0VmxExitVmresume(pVCpu, pVmxTransient));
12382 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, hmR0VmxExitVmxoff(pVCpu, pVmxTransient));
12383 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, hmR0VmxExitVmxon(pVCpu, pVmxTransient));
12384#else
12385 case VMX_EXIT_VMCLEAR:
12386 case VMX_EXIT_VMLAUNCH:
12387 case VMX_EXIT_VMPTRLD:
12388 case VMX_EXIT_VMPTRST:
12389 case VMX_EXIT_VMREAD:
12390 case VMX_EXIT_VMRESUME:
12391 case VMX_EXIT_VMWRITE:
12392 case VMX_EXIT_VMXOFF:
12393 case VMX_EXIT_VMXON:
12394 return hmR0VmxExitSetPendingXcptUD(pVCpu, pVmxTransient);
12395#endif
12396
12397 case VMX_EXIT_TRIPLE_FAULT: return hmR0VmxExitTripleFault(pVCpu, pVmxTransient);
12398 case VMX_EXIT_NMI_WINDOW: return hmR0VmxExitNmiWindow(pVCpu, pVmxTransient);
12399 case VMX_EXIT_INIT_SIGNAL: return hmR0VmxExitInitSignal(pVCpu, pVmxTransient);
12400 case VMX_EXIT_SIPI: return hmR0VmxExitSipi(pVCpu, pVmxTransient);
12401 case VMX_EXIT_IO_SMI: return hmR0VmxExitIoSmi(pVCpu, pVmxTransient);
12402 case VMX_EXIT_SMI: return hmR0VmxExitSmi(pVCpu, pVmxTransient);
12403 case VMX_EXIT_ERR_MSR_LOAD: return hmR0VmxExitErrMsrLoad(pVCpu, pVmxTransient);
12404 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return hmR0VmxExitErrInvalidGuestState(pVCpu, pVmxTransient);
12405 case VMX_EXIT_ERR_MACHINE_CHECK: return hmR0VmxExitErrMachineCheck(pVCpu, pVmxTransient);
12406
12407 case VMX_EXIT_INVEPT:
12408 case VMX_EXIT_INVVPID:
12409 case VMX_EXIT_VMFUNC:
12410 case VMX_EXIT_XSAVES:
12411 case VMX_EXIT_XRSTORS:
12412 return hmR0VmxExitSetPendingXcptUD(pVCpu, pVmxTransient);
12413
12414 case VMX_EXIT_ENCLS:
12415 case VMX_EXIT_RDSEED:
12416 case VMX_EXIT_PML_FULL:
12417 default:
12418 return hmR0VmxExitErrUndefined(pVCpu, pVmxTransient);
12419 }
12420#undef VMEXIT_CALL_RET
12421}
12422#endif /* !HMVMX_USE_FUNCTION_TABLE */
12423
12424
12425#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12426/**
12427 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
12428 *
12429 * @returns Strict VBox status code (i.e. informational status codes too).
12430 * @param pVCpu The cross context virtual CPU structure.
12431 * @param pVmxTransient The VMX-transient structure.
12432 */
12433DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitNested(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12434{
12435 uint32_t const rcReason = pVmxTransient->uExitReason;
12436 switch (rcReason)
12437 {
12438 case VMX_EXIT_EPT_MISCONFIG:
12439 case VMX_EXIT_EPT_VIOLATION:
12440 case VMX_EXIT_IO_INSTR:
12441 case VMX_EXIT_CPUID:
12442 case VMX_EXIT_RDTSC:
12443 case VMX_EXIT_RDTSCP:
12444 case VMX_EXIT_APIC_ACCESS:
12445 case VMX_EXIT_XCPT_OR_NMI:
12446 case VMX_EXIT_MOV_CRX:
12447 case VMX_EXIT_EXT_INT:
12448 case VMX_EXIT_INT_WINDOW:
12449 case VMX_EXIT_TPR_BELOW_THRESHOLD:
12450 case VMX_EXIT_MWAIT:
12451 case VMX_EXIT_MONITOR:
12452 case VMX_EXIT_TASK_SWITCH:
12453 case VMX_EXIT_PREEMPT_TIMER:
12454 case VMX_EXIT_RDMSR:
12455 case VMX_EXIT_WRMSR:
12456 case VMX_EXIT_VMCALL:
12457 case VMX_EXIT_MOV_DRX:
12458 case VMX_EXIT_HLT:
12459 case VMX_EXIT_INVD:
12460 case VMX_EXIT_INVLPG:
12461 case VMX_EXIT_RSM:
12462 case VMX_EXIT_MTF:
12463 case VMX_EXIT_PAUSE:
12464 case VMX_EXIT_GDTR_IDTR_ACCESS:
12465 case VMX_EXIT_LDTR_TR_ACCESS:
12466 case VMX_EXIT_WBINVD:
12467 case VMX_EXIT_XSETBV:
12468 case VMX_EXIT_RDRAND:
12469 case VMX_EXIT_INVPCID:
12470 case VMX_EXIT_GETSEC:
12471 case VMX_EXIT_RDPMC:
12472#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12473 case VMX_EXIT_VMCLEAR:
12474 case VMX_EXIT_VMLAUNCH:
12475 case VMX_EXIT_VMPTRLD:
12476 case VMX_EXIT_VMPTRST:
12477 case VMX_EXIT_VMREAD:
12478 case VMX_EXIT_VMRESUME:
12479 case VMX_EXIT_VMWRITE:
12480 case VMX_EXIT_VMXOFF:
12481 case VMX_EXIT_VMXON:
12482#endif
12483 case VMX_EXIT_TRIPLE_FAULT:
12484 case VMX_EXIT_NMI_WINDOW:
12485 case VMX_EXIT_INIT_SIGNAL:
12486 case VMX_EXIT_SIPI:
12487 case VMX_EXIT_IO_SMI:
12488 case VMX_EXIT_SMI:
12489 case VMX_EXIT_ERR_MSR_LOAD:
12490 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
12491 case VMX_EXIT_ERR_MACHINE_CHECK:
12492
12493 case VMX_EXIT_INVEPT:
12494 case VMX_EXIT_INVVPID:
12495 case VMX_EXIT_VMFUNC:
12496 case VMX_EXIT_XSAVES:
12497 case VMX_EXIT_XRSTORS:
12498
12499 case VMX_EXIT_ENCLS:
12500 case VMX_EXIT_RDSEED:
12501 case VMX_EXIT_PML_FULL:
12502 default:
12503 return hmR0VmxExitErrUndefined(pVCpu, pVmxTransient);
12504 }
12505#undef VMEXIT_CALL_RET
12506}
12507#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12508
12509
12510#ifdef VBOX_STRICT
12511/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
12512# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
12513 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
12514
12515# define HMVMX_ASSERT_PREEMPT_CPUID() \
12516 do { \
12517 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
12518 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
12519 } while (0)
12520
12521# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
12522 do { \
12523 AssertPtr((a_pVCpu)); \
12524 AssertPtr((a_pVmxTransient)); \
12525 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
12526 Assert((a_pVmxTransient)->pVmcsInfo); \
12527 Assert(ASMIntAreEnabled()); \
12528 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
12529 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
12530 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", (a_pVCpu)->idCpu)); \
12531 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
12532 if (VMMR0IsLogFlushDisabled((a_pVCpu))) \
12533 HMVMX_ASSERT_PREEMPT_CPUID(); \
12534 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
12535 } while (0)
12536
12537# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
12538 do { \
12539 Log4Func(("\n")); \
12540 } while (0)
12541#else
12542# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
12543 do { \
12544 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
12545 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
12546 } while (0)
12547# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
12548#endif
12549
12550
12551/**
12552 * Advances the guest RIP by the specified number of bytes.
12553 *
12554 * @param pVCpu The cross context virtual CPU structure.
12555 * @param cbInstr Number of bytes to advance the RIP by.
12556 *
12557 * @remarks No-long-jump zone!!!
12558 */
12559DECLINLINE(void) hmR0VmxAdvanceGuestRipBy(PVMCPU pVCpu, uint32_t cbInstr)
12560{
12561 /* Advance the RIP. */
12562 pVCpu->cpum.GstCtx.rip += cbInstr;
12563 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
12564
12565 /* Update interrupt inhibition. */
12566 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
12567 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
12568 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
12569}
12570
12571
12572/**
12573 * Advances the guest RIP after reading it from the VMCS.
12574 *
12575 * @returns VBox status code, no informational status codes.
12576 * @param pVCpu The cross context virtual CPU structure.
12577 * @param pVmxTransient The VMX-transient structure.
12578 *
12579 * @remarks No-long-jump zone!!!
12580 */
12581static int hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12582{
12583 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12584 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
12585 AssertRCReturn(rc, rc);
12586
12587 hmR0VmxAdvanceGuestRipBy(pVCpu, pVmxTransient->cbInstr);
12588 return VINF_SUCCESS;
12589}
12590
12591
12592/**
12593 * Handle a condition that occurred while delivering an event through the guest
12594 * IDT.
12595 *
12596 * @returns Strict VBox status code (i.e. informational status codes too).
12597 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
12598 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
12599 * to continue execution of the guest which will delivery the \#DF.
12600 * @retval VINF_EM_RESET if we detected a triple-fault condition.
12601 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
12602 *
12603 * @param pVCpu The cross context virtual CPU structure.
12604 * @param pVmxTransient The VMX-transient structure.
12605 *
12606 * @remarks No-long-jump zone!!!
12607 */
12608static VBOXSTRICTRC hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12609{
12610 uint32_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
12611
12612 int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
12613 rc2 |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
12614 AssertRCReturn(rc2, rc2);
12615
12616 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
12617 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
12618 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
12619 {
12620 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
12621 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
12622
12623 /*
12624 * If the event was a software interrupt (generated with INT n) or a software exception
12625 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
12626 * can handle the VM-exit and continue guest execution which will re-execute the
12627 * instruction rather than re-injecting the exception, as that can cause premature
12628 * trips to ring-3 before injection and involve TRPM which currently has no way of
12629 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
12630 * the problem).
12631 */
12632 IEMXCPTRAISE enmRaise;
12633 IEMXCPTRAISEINFO fRaiseInfo;
12634 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
12635 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
12636 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
12637 {
12638 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
12639 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
12640 }
12641 else if (VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo))
12642 {
12643 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
12644 uint32_t const fIdtVectorFlags = hmR0VmxGetIemXcptFlags(uIdtVector, uIdtVectorType);
12645 uint32_t const fExitVectorFlags = hmR0VmxGetIemXcptFlags(uExitVector, uExitVectorType);
12646 /** @todo Make AssertMsgReturn as just AssertMsg later. */
12647 AssertMsgReturn(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT,
12648 ("Unexpected VM-exit interruption vector type %#x!\n", uExitVectorType), VERR_VMX_IPE_5);
12649
12650 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
12651
12652 /* Determine a vectoring #PF condition, see comment in hmR0VmxExitXcptPF(). */
12653 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
12654 {
12655 pVmxTransient->fVectoringPF = true;
12656 enmRaise = IEMXCPTRAISE_PREV_EVENT;
12657 }
12658 }
12659 else
12660 {
12661 /*
12662 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
12663 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
12664 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
12665 */
12666 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
12667 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
12668 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
12669 enmRaise = IEMXCPTRAISE_PREV_EVENT;
12670 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
12671 }
12672
12673 /*
12674 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
12675 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
12676 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
12677 * subsequent VM-entry would fail.
12678 *
12679 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}.
12680 */
12681 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)
12682 && uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
12683 && ( enmRaise == IEMXCPTRAISE_PREV_EVENT
12684 || (fRaiseInfo & IEMXCPTRAISEINFO_NMI_PF))
12685 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
12686 {
12687 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
12688 }
12689
12690 switch (enmRaise)
12691 {
12692 case IEMXCPTRAISE_CURRENT_XCPT:
12693 {
12694 Log4Func(("IDT: Pending secondary Xcpt: uIdtVectoringInfo=%#RX64 uExitIntInfo=%#RX64\n",
12695 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uExitIntInfo));
12696 Assert(rcStrict == VINF_SUCCESS);
12697 break;
12698 }
12699
12700 case IEMXCPTRAISE_PREV_EVENT:
12701 {
12702 uint32_t u32ErrCode;
12703 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
12704 {
12705 rc2 = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
12706 AssertRCReturn(rc2, rc2);
12707 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
12708 }
12709 else
12710 u32ErrCode = 0;
12711
12712 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see hmR0VmxExitXcptPF(). */
12713 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
12714 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
12715 0 /* cbInstr */, u32ErrCode, pVCpu->cpum.GstCtx.cr2);
12716
12717 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo,
12718 pVCpu->hm.s.Event.u32ErrCode));
12719 Assert(rcStrict == VINF_SUCCESS);
12720 break;
12721 }
12722
12723 case IEMXCPTRAISE_REEXEC_INSTR:
12724 Assert(rcStrict == VINF_SUCCESS);
12725 break;
12726
12727 case IEMXCPTRAISE_DOUBLE_FAULT:
12728 {
12729 /*
12730 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
12731 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
12732 */
12733 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
12734 {
12735 pVmxTransient->fVectoringDoublePF = true;
12736 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo,
12737 pVCpu->cpum.GstCtx.cr2));
12738 rcStrict = VINF_SUCCESS;
12739 }
12740 else
12741 {
12742 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
12743 hmR0VmxSetPendingXcptDF(pVCpu);
12744 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,
12745 uIdtVector, uExitVector));
12746 rcStrict = VINF_HM_DOUBLE_FAULT;
12747 }
12748 break;
12749 }
12750
12751 case IEMXCPTRAISE_TRIPLE_FAULT:
12752 {
12753 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));
12754 rcStrict = VINF_EM_RESET;
12755 break;
12756 }
12757
12758 case IEMXCPTRAISE_CPU_HANG:
12759 {
12760 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
12761 rcStrict = VERR_EM_GUEST_CPU_HANG;
12762 break;
12763 }
12764
12765 default:
12766 {
12767 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
12768 rcStrict = VERR_VMX_IPE_2;
12769 break;
12770 }
12771 }
12772 }
12773 else if ( VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo)
12774 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitIntInfo)
12775 && uExitVector != X86_XCPT_DF
12776 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
12777 {
12778 /*
12779 * Execution of IRET caused this fault when NMI blocking was in effect (i.e we're in the guest NMI handler).
12780 * We need to set the block-by-NMI field so that NMIs remain blocked until the IRET execution is restarted.
12781 * See Intel spec. 30.7.1.2 "Resuming guest software after handling an exception".
12782 */
12783 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
12784 {
12785 Log4Func(("Setting VMCPU_FF_BLOCK_NMIS. fValid=%RTbool uExitReason=%u\n",
12786 VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason));
12787 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
12788 }
12789 }
12790
12791 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
12792 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
12793 return rcStrict;
12794}
12795
12796
12797/** @name VM-exit handlers.
12798 * @{
12799 */
12800/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
12801/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
12802/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
12803
12804/**
12805 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
12806 */
12807HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12808{
12809 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12810 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
12811 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
12812 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
12813 return VINF_SUCCESS;
12814 return VINF_EM_RAW_INTERRUPT;
12815}
12816
12817
12818/**
12819 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
12820 */
12821HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12822{
12823 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12824 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
12825
12826 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
12827 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
12828 AssertRCReturn(rc, rc);
12829
12830 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
12831 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
12832 && uIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
12833 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
12834
12835 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
12836 {
12837 /*
12838 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
12839 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
12840 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
12841 *
12842 * [1] -- See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
12843 * [2] -- See Intel spec. 27.5.5 "Updating Non-Register State".
12844 */
12845 VMXDispatchHostNmi();
12846 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
12847 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
12848 return VINF_SUCCESS;
12849 }
12850
12851 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
12852 VBOXSTRICTRC rcStrictRc1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
12853 if (RT_UNLIKELY(rcStrictRc1 == VINF_SUCCESS))
12854 { /* likely */ }
12855 else
12856 {
12857 if (rcStrictRc1 == VINF_HM_DOUBLE_FAULT)
12858 rcStrictRc1 = VINF_SUCCESS;
12859 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
12860 return rcStrictRc1;
12861 }
12862
12863 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
12864 uint32_t const uVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
12865 switch (uIntType)
12866 {
12867 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT: /* Privileged software exception. (#DB from ICEBP) */
12868 Assert(uVector == X86_XCPT_DB);
12869 RT_FALL_THRU();
12870 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
12871 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
12872 RT_FALL_THRU();
12873 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
12874 {
12875 /*
12876 * If there's any exception caused as a result of event injection, the resulting
12877 * secondary/final execption will be pending, we shall continue guest execution
12878 * after injecting the event. The page-fault case is complicated and we manually
12879 * handle any currently pending event in hmR0VmxExitXcptPF.
12880 */
12881 if (!pVCpu->hm.s.Event.fPending)
12882 { /* likely */ }
12883 else if (uVector != X86_XCPT_PF)
12884 {
12885 rc = VINF_SUCCESS;
12886 break;
12887 }
12888
12889 switch (uVector)
12890 {
12891 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pVmxTransient); break;
12892 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pVmxTransient); break;
12893 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pVmxTransient); break;
12894 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pVmxTransient); break;
12895 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pVmxTransient); break;
12896 case X86_XCPT_AC: rc = hmR0VmxExitXcptAC(pVCpu, pVmxTransient); break;
12897
12898 case X86_XCPT_NM: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
12899 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
12900 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
12901 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
12902 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
12903 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
12904 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
12905 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
12906 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
12907 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
12908 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
12909 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
12910 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS);
12911 rc = hmR0VmxExitXcptGeneric(pVCpu, pVmxTransient); break;
12912 default:
12913 {
12914 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
12915 if (pVmcsInfo->RealMode.fRealOnV86Active)
12916 {
12917 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
12918 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
12919 Assert(CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx));
12920
12921 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
12922 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
12923 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12924 AssertRCReturn(rc, rc);
12925 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
12926 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode,
12927 0 /* GCPtrFaultAddress */);
12928 }
12929 else
12930 {
12931 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
12932 pVCpu->hm.s.u32HMError = uVector;
12933 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
12934 }
12935 break;
12936 }
12937 }
12938 break;
12939 }
12940
12941 default:
12942 {
12943 pVCpu->hm.s.u32HMError = uExitIntInfo;
12944 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
12945 AssertMsgFailed(("Unexpected interruption info %#x\n", VMX_EXIT_INT_INFO_TYPE(uExitIntInfo)));
12946 break;
12947 }
12948 }
12949 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
12950 return rc;
12951}
12952
12953
12954/**
12955 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
12956 */
12957HMVMX_EXIT_NSRC_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12958{
12959 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12960
12961 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
12962 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
12963 int rc = hmR0VmxClearIntWindowExitVmcs(pVmcsInfo);
12964 AssertRCReturn(rc, rc);
12965
12966 /* Evaluate and deliver pending events and resume guest execution. */
12967 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
12968 return VINF_SUCCESS;
12969}
12970
12971
12972/**
12973 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
12974 */
12975HMVMX_EXIT_NSRC_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
12976{
12977 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
12978
12979 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
12980 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
12981 {
12982 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
12983 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
12984 }
12985
12986 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS));
12987
12988 /*
12989 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
12990 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
12991 */
12992 uint32_t fIntrState;
12993 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
12994 AssertRCReturn(rc, rc);
12995 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
12996 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
12997 {
12998 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
12999 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
13000
13001 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
13002 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
13003 AssertRCReturn(rc, rc);
13004 }
13005
13006 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
13007 rc = hmR0VmxClearNmiWindowExitVmcs(pVmcsInfo);
13008 AssertRCReturn(rc, rc);
13009
13010 /* Evaluate and deliver pending events and resume guest execution. */
13011 return VINF_SUCCESS;
13012}
13013
13014
13015/**
13016 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
13017 */
13018HMVMX_EXIT_NSRC_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13019{
13020 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13021 return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
13022}
13023
13024
13025/**
13026 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
13027 */
13028HMVMX_EXIT_NSRC_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13029{
13030 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13031 return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
13032}
13033
13034
13035/**
13036 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
13037 */
13038HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13039{
13040 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13041
13042 /*
13043 * Get the state we need and update the exit history entry.
13044 */
13045 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13046 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13047 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
13048 AssertRCReturn(rc, rc);
13049
13050 VBOXSTRICTRC rcStrict;
13051 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
13052 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
13053 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
13054 if (!pExitRec)
13055 {
13056 /*
13057 * Regular CPUID instruction execution.
13058 */
13059 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbInstr);
13060 if (rcStrict == VINF_SUCCESS)
13061 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13062 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13063 {
13064 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13065 rcStrict = VINF_SUCCESS;
13066 }
13067 }
13068 else
13069 {
13070 /*
13071 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
13072 */
13073 int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
13074 AssertRCReturn(rc2, rc2);
13075
13076 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
13077 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
13078
13079 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
13080 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
13081
13082 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
13083 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
13084 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
13085 }
13086 return rcStrict;
13087}
13088
13089
13090/**
13091 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
13092 */
13093HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13094{
13095 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13096
13097 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13098 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
13099 AssertRCReturn(rc, rc);
13100
13101 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
13102 return VINF_EM_RAW_EMULATE_INSTR;
13103
13104 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
13105 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
13106}
13107
13108
13109/**
13110 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
13111 */
13112HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13113{
13114 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13115
13116 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13117 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
13118 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13119 AssertRCReturn(rc, rc);
13120
13121 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbInstr);
13122 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13123 {
13124 /* If we get a spurious VM-exit when TSC offsetting is enabled,
13125 we must reset offsetting on VM-entry. See @bugref{6634}. */
13126 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
13127 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
13128 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13129 }
13130 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13131 {
13132 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13133 rcStrict = VINF_SUCCESS;
13134 }
13135 return rcStrict;
13136}
13137
13138
13139/**
13140 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
13141 */
13142HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13143{
13144 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13145
13146 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13147 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
13148 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13149 AssertRCReturn(rc, rc);
13150
13151 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbInstr);
13152 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13153 {
13154 /* If we get a spurious VM-exit when TSC offsetting is enabled,
13155 we must reset offsetting on VM-reentry. See @bugref{6634}. */
13156 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
13157 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
13158 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13159 }
13160 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13161 {
13162 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13163 rcStrict = VINF_SUCCESS;
13164 }
13165 return rcStrict;
13166}
13167
13168
13169/**
13170 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
13171 */
13172HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13173{
13174 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13175
13176 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13177 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
13178 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
13179 AssertRCReturn(rc, rc);
13180
13181 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
13182 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
13183 if (RT_LIKELY(rc == VINF_SUCCESS))
13184 {
13185 rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
13186 Assert(pVmxTransient->cbInstr == 2);
13187 }
13188 else
13189 {
13190 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
13191 rc = VERR_EM_INTERPRETER;
13192 }
13193 return rc;
13194}
13195
13196
13197/**
13198 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
13199 */
13200HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13201{
13202 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13203
13204 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
13205 if (EMAreHypercallInstructionsEnabled(pVCpu))
13206 {
13207 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13208 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
13209 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
13210 AssertRCReturn(rc, rc);
13211
13212 /* Perform the hypercall. */
13213 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
13214 if (rcStrict == VINF_SUCCESS)
13215 {
13216 rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
13217 AssertRCReturn(rc, rc);
13218 }
13219 else
13220 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
13221 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
13222 || RT_FAILURE(rcStrict));
13223
13224 /* If the hypercall changes anything other than guest's general-purpose registers,
13225 we would need to reload the guest changed bits here before VM-entry. */
13226 }
13227 else
13228 Log4Func(("Hypercalls not enabled\n"));
13229
13230 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
13231 if (RT_FAILURE(rcStrict))
13232 {
13233 hmR0VmxSetPendingXcptUD(pVCpu);
13234 rcStrict = VINF_SUCCESS;
13235 }
13236
13237 return rcStrict;
13238}
13239
13240
13241/**
13242 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
13243 */
13244HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13245{
13246 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13247 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging || pVCpu->hm.s.fUsingDebugLoop);
13248
13249 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13250 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
13251 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13252 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
13253 AssertRCReturn(rc, rc);
13254
13255 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbInstr, pVmxTransient->uExitQual);
13256
13257 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
13258 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13259 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13260 {
13261 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13262 rcStrict = VINF_SUCCESS;
13263 }
13264 else
13265 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) sttus: %Rrc\n", pVmxTransient->uExitQual,
13266 VBOXSTRICTRC_VAL(rcStrict)));
13267 return rcStrict;
13268}
13269
13270
13271/**
13272 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
13273 */
13274HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13275{
13276 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13277
13278 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13279 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
13280 AssertRCReturn(rc, rc);
13281
13282 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
13283 rc = EMInterpretMonitor(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
13284 if (RT_LIKELY(rc == VINF_SUCCESS))
13285 rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
13286 else
13287 {
13288 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
13289 rc = VERR_EM_INTERPRETER;
13290 }
13291 return rc;
13292}
13293
13294
13295/**
13296 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
13297 */
13298HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13299{
13300 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13301
13302 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13303 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
13304 AssertRCReturn(rc, rc);
13305
13306 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
13307 VBOXSTRICTRC rc2 = EMInterpretMWait(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
13308 rc = VBOXSTRICTRC_VAL(rc2);
13309 if (RT_LIKELY( rc == VINF_SUCCESS
13310 || rc == VINF_EM_HALT))
13311 {
13312 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
13313 AssertRCReturn(rc3, rc3);
13314
13315 if ( rc == VINF_EM_HALT
13316 && EMMonitorWaitShouldContinue(pVCpu, pCtx))
13317 rc = VINF_SUCCESS;
13318 }
13319 else
13320 {
13321 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
13322 rc = VERR_EM_INTERPRETER;
13323 }
13324 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
13325 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
13326 return rc;
13327}
13328
13329
13330/**
13331 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
13332 */
13333HMVMX_EXIT_NSRC_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13334{
13335 /*
13336 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root
13337 * mode. In theory, we should never get this VM-exit. This can happen only if dual-monitor
13338 * treatment of SMI and VMX is enabled, which can (only?) be done by executing VMCALL in
13339 * VMX root operation. If we get here, something funny is going on.
13340 *
13341 * See Intel spec. 33.15.5 "Enabling the Dual-Monitor Treatment".
13342 */
13343 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13344 AssertMsgFailed(("Unexpected RSM VM-exit\n"));
13345 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
13346}
13347
13348
13349/**
13350 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
13351 */
13352HMVMX_EXIT_NSRC_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13353{
13354 /*
13355 * This can only happen if we support dual-monitor treatment of SMI, which can be activated
13356 * by executing VMCALL in VMX root operation. Only an STM (SMM transfer monitor) would get
13357 * this VM-exit when we (the executive monitor) execute a VMCALL in VMX root mode or receive
13358 * an SMI. If we get here, something funny is going on.
13359 *
13360 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
13361 * See Intel spec. 25.3 "Other Causes of VM-Exits"
13362 */
13363 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13364 AssertMsgFailed(("Unexpected SMI VM-exit\n"));
13365 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
13366}
13367
13368
13369/**
13370 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
13371 */
13372HMVMX_EXIT_NSRC_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13373{
13374 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
13375 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13376 AssertMsgFailed(("Unexpected IO SMI VM-exit\n"));
13377 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
13378}
13379
13380
13381/**
13382 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
13383 */
13384HMVMX_EXIT_NSRC_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13385{
13386 /*
13387 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used.
13388 * We don't make use of it as our guests don't have direct access to the host LAPIC.
13389 * See Intel spec. 25.3 "Other Causes of VM-exits".
13390 */
13391 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13392 AssertMsgFailed(("Unexpected SIPI VM-exit\n"));
13393 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
13394}
13395
13396
13397/**
13398 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
13399 * VM-exit.
13400 */
13401HMVMX_EXIT_NSRC_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13402{
13403 /*
13404 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
13405 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery" and Intel spec. 29.3 "VMX Instructions" for "VMXON".
13406 *
13407 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these VM-exits.
13408 * See Intel spec. "23.8 Restrictions on VMX operation".
13409 */
13410 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13411 return VINF_SUCCESS;
13412}
13413
13414
13415/**
13416 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
13417 * VM-exit.
13418 */
13419HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13420{
13421 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13422 return VINF_EM_RESET;
13423}
13424
13425
13426/**
13427 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
13428 */
13429HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13430{
13431 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13432
13433 int rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
13434 AssertRCReturn(rc, rc);
13435
13436 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
13437 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
13438 rc = VINF_SUCCESS;
13439 else
13440 rc = VINF_EM_HALT;
13441
13442 if (rc != VINF_SUCCESS)
13443 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
13444 return rc;
13445}
13446
13447
13448/**
13449 * VM-exit handler for instructions that result in a \#UD exception delivered to
13450 * the guest.
13451 */
13452HMVMX_EXIT_NSRC_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13453{
13454 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13455 hmR0VmxSetPendingXcptUD(pVCpu);
13456 return VINF_SUCCESS;
13457}
13458
13459
13460/**
13461 * VM-exit handler for expiry of the VMX-preemption timer.
13462 */
13463HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13464{
13465 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13466
13467 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
13468 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
13469
13470 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
13471 PVM pVM = pVCpu->CTX_SUFF(pVM);
13472 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
13473 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
13474 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
13475}
13476
13477
13478/**
13479 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
13480 */
13481HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13482{
13483 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13484
13485 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13486 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13487 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
13488 AssertRCReturn(rc, rc);
13489
13490 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbInstr);
13491 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
13492 : HM_CHANGED_RAISED_XCPT_MASK);
13493
13494 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
13495 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
13496
13497 return rcStrict;
13498}
13499
13500
13501/**
13502 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
13503 */
13504HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13505{
13506 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13507 /** @todo Use VM-exit instruction information. */
13508 return VERR_EM_INTERPRETER;
13509}
13510
13511
13512/**
13513 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
13514 * Error VM-exit.
13515 */
13516HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13517{
13518 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13519 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
13520 AssertRCReturn(rc, rc);
13521
13522 rc = hmR0VmxCheckVmcsCtls(pVCpu, pVmcsInfo);
13523 if (RT_FAILURE(rc))
13524 return rc;
13525
13526 uint32_t const uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pVmcsInfo);
13527 NOREF(uInvalidReason);
13528
13529#ifdef VBOX_STRICT
13530 uint32_t fIntrState;
13531 RTHCUINTREG uHCReg;
13532 uint64_t u64Val;
13533 uint32_t u32Val;
13534 rc = hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
13535 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
13536 rc |= hmR0VmxReadEntryInstrLenVmcs(pVmxTransient);
13537 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
13538 AssertRCReturn(rc, rc);
13539
13540 Log4(("uInvalidReason %u\n", uInvalidReason));
13541 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
13542 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
13543 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
13544 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
13545
13546 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
13547 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
13548 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
13549 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
13550 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
13551 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
13552 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
13553 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
13554 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
13555 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
13556 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
13557 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
13558
13559 hmR0DumpRegs(pVCpu);
13560#endif
13561
13562 return VERR_VMX_INVALID_GUEST_STATE;
13563}
13564
13565
13566/**
13567 * VM-exit handler for VM-entry failure due to an MSR-load
13568 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
13569 */
13570HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13571{
13572 AssertMsgFailed(("Unexpected MSR-load exit\n"));
13573 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
13574}
13575
13576
13577/**
13578 * VM-exit handler for VM-entry failure due to a machine-check event
13579 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
13580 */
13581HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13582{
13583 AssertMsgFailed(("Unexpected machine-check event exit\n"));
13584 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
13585}
13586
13587
13588/**
13589 * VM-exit handler for all undefined reasons. Should never ever happen.. in
13590 * theory.
13591 */
13592HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13593{
13594 RT_NOREF2(pVCpu, pVmxTransient);
13595 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d\n", pVmxTransient->uExitReason));
13596 return VERR_VMX_UNDEFINED_EXIT_CODE;
13597}
13598
13599
13600/**
13601 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
13602 * (VMX_EXIT_GDTR_IDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
13603 * Conditional VM-exit.
13604 */
13605HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13606{
13607 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13608
13609 /* By default, we don't enable VMX_PROC_CTLS2_DESCRIPTOR_TABLE_EXIT. */
13610 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
13611 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13612 if (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_DESC_TABLE_EXIT)
13613 return VERR_EM_INTERPRETER;
13614 AssertMsgFailed(("Unexpected XDTR access\n"));
13615 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
13616}
13617
13618
13619/**
13620 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
13621 */
13622HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13623{
13624 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13625
13626 /* By default, we don't enable VMX_PROC_CTLS2_RDRAND_EXIT. */
13627 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13628 if (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_RDRAND_EXIT)
13629 return VERR_EM_INTERPRETER;
13630 AssertMsgFailed(("Unexpected RDRAND exit\n"));
13631 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
13632}
13633
13634
13635/**
13636 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
13637 */
13638HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13639{
13640 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13641
13642 /** @todo Optimize this: We currently drag in in the whole MSR state
13643 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
13644 * MSRs required. That would require changes to IEM and possibly CPUM too.
13645 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
13646 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13647 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
13648 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
13649 switch (idMsr)
13650 {
13651 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
13652 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
13653 }
13654
13655 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13656 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fImport);
13657 AssertRCReturn(rc, rc);
13658
13659 Log4Func(("ecx=%#RX32\n", idMsr));
13660
13661#ifdef VBOX_STRICT
13662 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
13663 {
13664 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
13665 && idMsr != MSR_K6_EFER)
13666 {
13667 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
13668 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
13669 }
13670 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
13671 {
13672 Assert(pVmcsInfo->pvMsrBitmap);
13673 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
13674 if (fMsrpm & VMXMSRPM_ALLOW_RD)
13675 {
13676 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
13677 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
13678 }
13679 }
13680 }
13681#endif
13682
13683 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbInstr);
13684 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
13685 if (rcStrict == VINF_SUCCESS)
13686 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
13687 | HM_CHANGED_GUEST_RAX | HM_CHANGED_GUEST_RDX);
13688 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13689 {
13690 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13691 rcStrict = VINF_SUCCESS;
13692 }
13693 else
13694 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ, ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
13695
13696 return rcStrict;
13697}
13698
13699
13700/**
13701 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
13702 */
13703HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13704{
13705 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13706
13707 /** @todo Optimize this: We currently drag in in the whole MSR state
13708 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
13709 * MSRs required. That would require changes to IEM and possibly CPUM too.
13710 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
13711 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
13712 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
13713
13714 /*
13715 * The FS and GS base MSRs are not part of the above all-MSRs mask.
13716 * Although we don't need to fetch the base as it will be overwritten shortly, while
13717 * loading guest-state we would also load the entire segment register including limit
13718 * and attributes and thus we need to load them here.
13719 */
13720 switch (idMsr)
13721 {
13722 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
13723 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
13724 }
13725
13726 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13727 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13728 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fImport);
13729 AssertRCReturn(rc, rc);
13730
13731 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
13732
13733 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbInstr);
13734 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
13735
13736 if (rcStrict == VINF_SUCCESS)
13737 {
13738 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13739
13740 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
13741 if ( idMsr == MSR_IA32_APICBASE
13742 || ( idMsr >= MSR_IA32_X2APIC_START
13743 && idMsr <= MSR_IA32_X2APIC_END))
13744 {
13745 /*
13746 * We've already saved the APIC related guest-state (TPR) in post-run phase.
13747 * When full APIC register virtualization is implemented we'll have to make
13748 * sure APIC state is saved from the VMCS before IEM changes it.
13749 */
13750 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
13751 }
13752 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
13753 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
13754 else if (idMsr == MSR_K6_EFER)
13755 {
13756 /*
13757 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
13758 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
13759 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
13760 */
13761 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
13762 }
13763
13764 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */
13765 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
13766 {
13767 switch (idMsr)
13768 {
13769 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
13770 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
13771 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
13772 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS); break;
13773 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS); break;
13774 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
13775 default:
13776 {
13777 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
13778 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
13779 else if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
13780 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
13781 break;
13782 }
13783 }
13784 }
13785#ifdef VBOX_STRICT
13786 else
13787 {
13788 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
13789 switch (idMsr)
13790 {
13791 case MSR_IA32_SYSENTER_CS:
13792 case MSR_IA32_SYSENTER_EIP:
13793 case MSR_IA32_SYSENTER_ESP:
13794 case MSR_K8_FS_BASE:
13795 case MSR_K8_GS_BASE:
13796 {
13797 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
13798 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
13799 }
13800
13801 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
13802 default:
13803 {
13804 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
13805 {
13806 /* EFER MSR writes are always intercepted. */
13807 if (idMsr != MSR_K6_EFER)
13808 {
13809 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
13810 idMsr));
13811 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
13812 }
13813 }
13814
13815 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
13816 {
13817 Assert(pVmcsInfo->pvMsrBitmap);
13818 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
13819 if (fMsrpm & VMXMSRPM_ALLOW_WR)
13820 {
13821 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
13822 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
13823 }
13824 }
13825 break;
13826 }
13827 }
13828 }
13829#endif /* VBOX_STRICT */
13830 }
13831 else if (rcStrict == VINF_IEM_RAISED_XCPT)
13832 {
13833 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13834 rcStrict = VINF_SUCCESS;
13835 }
13836 else
13837 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE, ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
13838
13839 return rcStrict;
13840}
13841
13842
13843/**
13844 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
13845 */
13846HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13847{
13848 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13849 /** @todo The guest has likely hit a contended spinlock. We might want to
13850 * poke a schedule different guest VCPU. */
13851 return VINF_EM_RAW_INTERRUPT;
13852}
13853
13854
13855/**
13856 * VM-exit handler for when the TPR value is lowered below the specified
13857 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
13858 */
13859HMVMX_EXIT_NSRC_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13860{
13861 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13862 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
13863
13864 /*
13865 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
13866 * We'll re-evaluate pending interrupts and inject them before the next VM
13867 * entry so we can just continue execution here.
13868 */
13869 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
13870 return VINF_SUCCESS;
13871}
13872
13873
13874/**
13875 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
13876 * VM-exit.
13877 *
13878 * @retval VINF_SUCCESS when guest execution can continue.
13879 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
13880 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
13881 * interpreter.
13882 */
13883HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
13884{
13885 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13886 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
13887
13888 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13889 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
13890 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13891 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
13892 AssertRCReturn(rc, rc);
13893
13894 VBOXSTRICTRC rcStrict;
13895 PVM pVM = pVCpu->CTX_SUFF(pVM);
13896 RTGCUINTPTR const uExitQual = pVmxTransient->uExitQual;
13897 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
13898 switch (uAccessType)
13899 {
13900 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE: /* MOV to CRx */
13901 {
13902 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
13903 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_REGISTER(uExitQual),
13904 VMX_EXIT_QUAL_CRX_GENREG(uExitQual));
13905 AssertMsg( rcStrict == VINF_SUCCESS
13906 || rcStrict == VINF_IEM_RAISED_XCPT
13907 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13908
13909 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual))
13910 {
13911 case 0:
13912 {
13913 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
13914 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
13915 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
13916 Log4Func(("CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
13917
13918 /*
13919 * This is a kludge for handling switches back to real mode when we try to use
13920 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
13921 * deal with special selector values, so we have to return to ring-3 and run
13922 * there till the selector values are V86 mode compatible.
13923 *
13924 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
13925 * latter is an alias for VINF_IEM_RAISED_XCPT which is converted to VINF_SUCCESs
13926 * at the end of this function.
13927 */
13928 if ( rc == VINF_SUCCESS
13929 && !pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest
13930 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
13931 && (uOldCr0 & X86_CR0_PE)
13932 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
13933 {
13934 /** @todo check selectors rather than returning all the time. */
13935 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
13936 rcStrict = VINF_EM_RESCHEDULE_REM;
13937 }
13938 break;
13939 }
13940
13941 case 2:
13942 {
13943 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write);
13944 /* Nothing to do here, CR2 it's not part of the VMCS. */
13945 break;
13946 }
13947
13948 case 3:
13949 {
13950 Assert( !pVM->hm.s.fNestedPaging
13951 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
13952 || pVCpu->hm.s.fUsingDebugLoop);
13953 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write);
13954 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
13955 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
13956 Log4Func(("CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
13957 break;
13958 }
13959
13960 case 4:
13961 {
13962 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write);
13963 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
13964 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
13965 Log4Func(("CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
13966 pVCpu->cpum.GstCtx.cr4, pVCpu->hm.s.fLoadSaveGuestXcr0));
13967 break;
13968 }
13969
13970 case 8:
13971 {
13972 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write);
13973 Assert(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
13974 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
13975 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
13976 break;
13977 }
13978 default:
13979 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual)));
13980 break;
13981 }
13982 break;
13983 }
13984
13985 case VMX_EXIT_QUAL_CRX_ACCESS_READ: /* MOV from CRx */
13986 {
13987 Assert( !pVM->hm.s.fNestedPaging
13988 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
13989 || pVCpu->hm.s.fUsingDebugLoop
13990 || VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 3);
13991 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
13992 Assert( VMX_EXIT_QUAL_CRX_REGISTER(uExitQual) != 8
13993 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
13994
13995 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_GENREG(uExitQual),
13996 VMX_EXIT_QUAL_CRX_REGISTER(uExitQual));
13997 AssertMsg( rcStrict == VINF_SUCCESS
13998 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13999#ifdef VBOX_WITH_STATISTICS
14000 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQual))
14001 {
14002 case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
14003 case 2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break;
14004 case 3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break;
14005 case 4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break;
14006 case 8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break;
14007 }
14008#endif
14009 Log4Func(("CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQual),
14010 VBOXSTRICTRC_VAL(rcStrict)));
14011 if (VMX_EXIT_QUAL_CRX_GENREG(uExitQual) == X86_GREG_xSP)
14012 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
14013 else
14014 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
14015 break;
14016 }
14017
14018 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
14019 {
14020 rcStrict = IEMExecDecodedClts(pVCpu, pVmxTransient->cbInstr);
14021 AssertMsg( rcStrict == VINF_SUCCESS
14022 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14023
14024 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
14025 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
14026 Log4Func(("CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
14027 break;
14028 }
14029
14030 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
14031 {
14032 /* Note! LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here. */
14033 rc = hmR0VmxReadGuestLinearAddrVmcs(pVCpu, pVmxTransient);
14034 AssertRCReturn(rc, rc);
14035 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr, VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual),
14036 pVmxTransient->uGuestLinearAddr);
14037 AssertMsg( rcStrict == VINF_SUCCESS
14038 || rcStrict == VINF_IEM_RAISED_XCPT
14039 , ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14040
14041 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
14042 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
14043 Log4Func(("LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
14044 break;
14045 }
14046
14047 default:
14048 AssertMsgFailedReturn(("Invalid access-type in Mov CRx VM-exit qualification %#x\n", uAccessType),
14049 VERR_VMX_UNEXPECTED_EXCEPTION);
14050 }
14051
14052 Assert( (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
14053 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
14054 if (rcStrict == VINF_IEM_RAISED_XCPT)
14055 {
14056 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
14057 rcStrict = VINF_SUCCESS;
14058 }
14059
14060 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
14061 NOREF(pVM);
14062 return rcStrict;
14063}
14064
14065
14066/**
14067 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
14068 * VM-exit.
14069 */
14070HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14071{
14072 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14073 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
14074
14075 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14076 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14077 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
14078 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
14079 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
14080 | CPUMCTX_EXTRN_EFER);
14081 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
14082 AssertRCReturn(rc, rc);
14083
14084 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
14085 uint32_t uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
14086 uint8_t uIOWidth = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQual);
14087 bool fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
14088 bool fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
14089 bool fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
14090 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction;
14091 AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_VMX_IPE_1);
14092
14093 /*
14094 * Update exit history to see if this exit can be optimized.
14095 */
14096 VBOXSTRICTRC rcStrict;
14097 PCEMEXITREC pExitRec = NULL;
14098 if ( !fGstStepping
14099 && !fDbgStepping)
14100 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
14101 !fIOString
14102 ? !fIOWrite
14103 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
14104 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
14105 : !fIOWrite
14106 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
14107 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
14108 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
14109 if (!pExitRec)
14110 {
14111 /* I/O operation lookup arrays. */
14112 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */
14113 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
14114 uint32_t const cbValue = s_aIOSizes[uIOWidth];
14115 uint32_t const cbInstr = pVmxTransient->cbInstr;
14116 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
14117 PVM pVM = pVCpu->CTX_SUFF(pVM);
14118 if (fIOString)
14119 {
14120 /*
14121 * INS/OUTS - I/O String instruction.
14122 *
14123 * Use instruction-information if available, otherwise fall back on
14124 * interpreting the instruction.
14125 */
14126 Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
14127 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
14128 bool const fInsOutsInfo = RT_BF_GET(pVM->hm.s.vmx.Msrs.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
14129 if (fInsOutsInfo)
14130 {
14131 int rc2 = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
14132 AssertRCReturn(rc2, rc2);
14133 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
14134 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
14135 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
14136 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
14137 if (fIOWrite)
14138 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
14139 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
14140 else
14141 {
14142 /*
14143 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
14144 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
14145 * See Intel Instruction spec. for "INS".
14146 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
14147 */
14148 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
14149 }
14150 }
14151 else
14152 rcStrict = IEMExecOne(pVCpu);
14153
14154 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
14155 fUpdateRipAlready = true;
14156 }
14157 else
14158 {
14159 /*
14160 * IN/OUT - I/O instruction.
14161 */
14162 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
14163 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth];
14164 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
14165 if (fIOWrite)
14166 {
14167 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
14168 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
14169 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
14170 && !pCtx->eflags.Bits.u1TF)
14171 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
14172 }
14173 else
14174 {
14175 uint32_t u32Result = 0;
14176 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
14177 if (IOM_SUCCESS(rcStrict))
14178 {
14179 /* Save result of I/O IN instr. in AL/AX/EAX. */
14180 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
14181 }
14182 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
14183 && !pCtx->eflags.Bits.u1TF)
14184 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
14185 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
14186 }
14187 }
14188
14189 if (IOM_SUCCESS(rcStrict))
14190 {
14191 if (!fUpdateRipAlready)
14192 {
14193 hmR0VmxAdvanceGuestRipBy(pVCpu, cbInstr);
14194 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
14195 }
14196
14197 /*
14198 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
14199 * while booting Fedora 17 64-bit guest.
14200 *
14201 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
14202 */
14203 if (fIOString)
14204 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
14205
14206 /*
14207 * If any I/O breakpoints are armed, we need to check if one triggered
14208 * and take appropriate action.
14209 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
14210 */
14211 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
14212 AssertRCReturn(rc, rc);
14213
14214 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
14215 * execution engines about whether hyper BPs and such are pending. */
14216 uint32_t const uDr7 = pCtx->dr[7];
14217 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
14218 && X86_DR7_ANY_RW_IO(uDr7)
14219 && (pCtx->cr4 & X86_CR4_DE))
14220 || DBGFBpIsHwIoArmed(pVM)))
14221 {
14222 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
14223
14224 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
14225 VMMRZCallRing3Disable(pVCpu);
14226 HM_DISABLE_PREEMPT(pVCpu);
14227
14228 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
14229
14230 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
14231 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
14232 {
14233 /* Raise #DB. */
14234 if (fIsGuestDbgActive)
14235 ASMSetDR6(pCtx->dr[6]);
14236 if (pCtx->dr[7] != uDr7)
14237 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR7;
14238
14239 hmR0VmxSetPendingXcptDB(pVCpu);
14240 }
14241 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
14242 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
14243 else if ( rcStrict2 != VINF_SUCCESS
14244 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
14245 rcStrict = rcStrict2;
14246 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
14247
14248 HM_RESTORE_PREEMPT();
14249 VMMRZCallRing3Enable(pVCpu);
14250 }
14251 }
14252
14253#ifdef VBOX_STRICT
14254 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
14255 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
14256 Assert(!fIOWrite);
14257 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
14258 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14259 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
14260 Assert(fIOWrite);
14261 else
14262 {
14263# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
14264 * statuses, that the VMM device and some others may return. See
14265 * IOM_SUCCESS() for guidance. */
14266 AssertMsg( RT_FAILURE(rcStrict)
14267 || rcStrict == VINF_SUCCESS
14268 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14269 || rcStrict == VINF_EM_DBG_BREAKPOINT
14270 || rcStrict == VINF_EM_RAW_GUEST_TRAP
14271 || rcStrict == VINF_EM_RAW_TO_R3
14272 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14273# endif
14274 }
14275#endif
14276 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
14277 }
14278 else
14279 {
14280 /*
14281 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
14282 */
14283 int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
14284 AssertRCReturn(rc2, rc2);
14285 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead
14286 : fIOWrite ? &pVCpu->hm.s.StatExitIOStringWrite : &pVCpu->hm.s.StatExitIOStringRead);
14287 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
14288 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
14289 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
14290 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOWidth));
14291
14292 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
14293 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
14294
14295 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
14296 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
14297 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
14298 }
14299 return rcStrict;
14300}
14301
14302
14303/**
14304 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
14305 * VM-exit.
14306 */
14307HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14308{
14309 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14310
14311 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
14312 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
14313 AssertRCReturn(rc, rc);
14314 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
14315 {
14316 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
14317 AssertRCReturn(rc, rc);
14318 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
14319 {
14320 uint32_t uErrCode;
14321 RTGCUINTPTR GCPtrFaultAddress;
14322 uint32_t const uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
14323 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
14324 bool const fErrorCodeValid = VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo);
14325 if (fErrorCodeValid)
14326 {
14327 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
14328 AssertRCReturn(rc, rc);
14329 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
14330 }
14331 else
14332 uErrCode = 0;
14333
14334 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
14335 && uVector == X86_XCPT_PF)
14336 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
14337 else
14338 GCPtrFaultAddress = 0;
14339
14340 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
14341 AssertRCReturn(rc, rc);
14342
14343 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
14344 pVmxTransient->cbInstr, uErrCode, GCPtrFaultAddress);
14345
14346 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", uIntType, uVector));
14347 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
14348 return VINF_EM_RAW_INJECT_TRPM_EVENT;
14349 }
14350 }
14351
14352 /* Fall back to the interpreter to emulate the task-switch. */
14353 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
14354 return VERR_EM_INTERPRETER;
14355}
14356
14357
14358/**
14359 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
14360 */
14361HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14362{
14363 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14364
14365 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14366 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
14367 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
14368 AssertRCReturn(rc, rc);
14369 return VINF_EM_DBG_STEPPED;
14370}
14371
14372
14373/**
14374 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
14375 */
14376HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14377{
14378 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14379 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
14380
14381 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
14382 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
14383 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
14384 {
14385 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
14386 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
14387 {
14388 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret);
14389 return VINF_EM_RAW_INJECT_TRPM_EVENT;
14390 }
14391 }
14392 else
14393 {
14394 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
14395 rcStrict1 = VINF_SUCCESS;
14396 return rcStrict1;
14397 }
14398
14399 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
14400 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14401 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
14402 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
14403 AssertRCReturn(rc, rc);
14404
14405 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
14406 uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
14407 VBOXSTRICTRC rcStrict2;
14408 switch (uAccessType)
14409 {
14410 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
14411 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
14412 {
14413 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
14414 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
14415 ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
14416
14417 RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
14418 GCPhys &= PAGE_BASE_GC_MASK;
14419 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
14420 PVM pVM = pVCpu->CTX_SUFF(pVM);
14421 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
14422 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
14423
14424 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14425 rcStrict2 = IOMMMIOPhysHandler(pVM, pVCpu,
14426 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW,
14427 CPUMCTX2CORE(pCtx), GCPhys);
14428 Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
14429 if ( rcStrict2 == VINF_SUCCESS
14430 || rcStrict2 == VERR_PAGE_TABLE_NOT_PRESENT
14431 || rcStrict2 == VERR_PAGE_NOT_PRESENT)
14432 {
14433 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
14434 | HM_CHANGED_GUEST_APIC_TPR);
14435 rcStrict2 = VINF_SUCCESS;
14436 }
14437 break;
14438 }
14439
14440 default:
14441 Log4Func(("uAccessType=%#x\n", uAccessType));
14442 rcStrict2 = VINF_EM_RAW_EMULATE_INSTR;
14443 break;
14444 }
14445
14446 if (rcStrict2 != VINF_SUCCESS)
14447 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchApicAccessToR3);
14448 return rcStrict2;
14449}
14450
14451
14452/**
14453 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
14454 * VM-exit.
14455 */
14456HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14457{
14458 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14459
14460 /* We should -not- get this VM-exit if the guest's debug registers were active. */
14461 if (pVmxTransient->fWasGuestDebugStateActive)
14462 {
14463 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
14464 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient);
14465 }
14466
14467 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14468 if ( !pVCpu->hm.s.fSingleInstruction
14469 && !pVmxTransient->fWasHyperDebugStateActive)
14470 {
14471 Assert(!DBGFIsStepping(pVCpu));
14472 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
14473
14474 /* Don't intercept MOV DRx any more. */
14475 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
14476 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
14477 AssertRCReturn(rc, rc);
14478
14479 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
14480 VMMRZCallRing3Disable(pVCpu);
14481 HM_DISABLE_PREEMPT(pVCpu);
14482
14483 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
14484 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
14485 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
14486
14487 HM_RESTORE_PREEMPT();
14488 VMMRZCallRing3Enable(pVCpu);
14489
14490#ifdef VBOX_WITH_STATISTICS
14491 rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
14492 AssertRCReturn(rc, rc);
14493 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
14494 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
14495 else
14496 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
14497#endif
14498 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
14499 return VINF_SUCCESS;
14500 }
14501
14502 /*
14503 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
14504 * The EFER MSR is always up-to-date.
14505 * Update the segment registers and DR7 from the CPU.
14506 */
14507 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14508 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
14509 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
14510 AssertRCReturn(rc, rc);
14511 Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
14512
14513 PVM pVM = pVCpu->CTX_SUFF(pVM);
14514 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
14515 {
14516 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
14517 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
14518 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
14519 if (RT_SUCCESS(rc))
14520 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
14521 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
14522 }
14523 else
14524 {
14525 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
14526 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
14527 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
14528 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
14529 }
14530
14531 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
14532 if (RT_SUCCESS(rc))
14533 {
14534 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
14535 AssertRCReturn(rc2, rc2);
14536 return VINF_SUCCESS;
14537 }
14538 return rc;
14539}
14540
14541
14542/**
14543 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
14544 * Conditional VM-exit.
14545 */
14546HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14547{
14548 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14549 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
14550
14551 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
14552 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
14553 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
14554 {
14555 /* If event delivery causes an EPT misconfig (MMIO), go back to instruction emulation as otherwise
14556 injecting the original pending event would most likely cause the same EPT misconfig VM-exit. */
14557 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
14558 {
14559 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret);
14560 return VINF_EM_RAW_INJECT_TRPM_EVENT;
14561 }
14562 }
14563 else
14564 {
14565 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
14566 rcStrict1 = VINF_SUCCESS;
14567 return rcStrict1;
14568 }
14569
14570 /*
14571 * Get sufficent state and update the exit history entry.
14572 */
14573 RTGCPHYS GCPhys;
14574 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14575 int rc = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys);
14576 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
14577 AssertRCReturn(rc, rc);
14578
14579 VBOXSTRICTRC rcStrict;
14580 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
14581 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
14582 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
14583 if (!pExitRec)
14584 {
14585 /*
14586 * If we succeed, resume guest execution.
14587 * If we fail in interpreting the instruction because we couldn't get the guest physical address
14588 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
14589 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
14590 * weird case. See @bugref{6043}.
14591 */
14592 PVM pVM = pVCpu->CTX_SUFF(pVM);
14593 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14594 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
14595 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
14596 if ( rcStrict == VINF_SUCCESS
14597 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
14598 || rcStrict == VERR_PAGE_NOT_PRESENT)
14599 {
14600 /* Successfully handled MMIO operation. */
14601 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
14602 | HM_CHANGED_GUEST_APIC_TPR);
14603 rcStrict = VINF_SUCCESS;
14604 }
14605 }
14606 else
14607 {
14608 /*
14609 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
14610 */
14611 int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
14612 AssertRCReturn(rc2, rc2);
14613
14614 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
14615 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
14616
14617 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
14618 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
14619
14620 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
14621 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
14622 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
14623 }
14624 return VBOXSTRICTRC_TODO(rcStrict);
14625}
14626
14627
14628/**
14629 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
14630 * VM-exit.
14631 */
14632HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14633{
14634 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14635 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
14636
14637 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
14638 VBOXSTRICTRC rcStrict1 = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
14639 if (RT_LIKELY(rcStrict1 == VINF_SUCCESS))
14640 {
14641 /* In the unlikely case that the EPT violation happened as a result of delivering an event, log it. */
14642 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
14643 Log4Func(("EPT violation with an event pending u64IntInfo=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo));
14644 }
14645 else
14646 {
14647 if (rcStrict1 == VINF_HM_DOUBLE_FAULT)
14648 rcStrict1 = VINF_SUCCESS;
14649 return rcStrict1;
14650 }
14651
14652 RTGCPHYS GCPhys;
14653 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14654 int rc = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &GCPhys);
14655 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
14656 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
14657 AssertRCReturn(rc, rc);
14658
14659 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
14660 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQual));
14661
14662 RTGCUINT uErrorCode = 0;
14663 if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_INSTR_FETCH)
14664 uErrorCode |= X86_TRAP_PF_ID;
14665 if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_DATA_WRITE)
14666 uErrorCode |= X86_TRAP_PF_RW;
14667 if (pVmxTransient->uExitQual & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT)
14668 uErrorCode |= X86_TRAP_PF_P;
14669
14670 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
14671
14672
14673 /* Handle the pagefault trap for the nested shadow table. */
14674 PVM pVM = pVCpu->CTX_SUFF(pVM);
14675 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14676
14677 Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x cs:rip=%#04x:%#RX64\n", pVmxTransient->uExitQual, GCPhys, uErrorCode,
14678 pCtx->cs.Sel, pCtx->rip));
14679
14680 VBOXSTRICTRC rcStrict2 = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
14681 TRPMResetTrap(pVCpu);
14682
14683 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
14684 if ( rcStrict2 == VINF_SUCCESS
14685 || rcStrict2 == VERR_PAGE_TABLE_NOT_PRESENT
14686 || rcStrict2 == VERR_PAGE_NOT_PRESENT)
14687 {
14688 /* Successfully synced our nested page tables. */
14689 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
14690 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
14691 return VINF_SUCCESS;
14692 }
14693
14694 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));
14695 return rcStrict2;
14696}
14697
14698/** @} */
14699
14700/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
14701/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit exception handlers =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
14702/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
14703
14704/**
14705 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
14706 */
14707static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14708{
14709 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14710 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
14711
14712 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
14713 AssertRCReturn(rc, rc);
14714
14715 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
14716 {
14717 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
14718 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
14719
14720 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
14721 * provides VM-exit instruction length. If this causes problem later,
14722 * disassemble the instruction like it's done on AMD-V. */
14723 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
14724 AssertRCReturn(rc2, rc2);
14725 return rc;
14726 }
14727
14728 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
14729 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
14730 return rc;
14731}
14732
14733
14734/**
14735 * VM-exit exception handler for \#BP (Breakpoint exception).
14736 */
14737static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14738{
14739 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14740 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
14741
14742 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
14743 AssertRCReturn(rc, rc);
14744
14745 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14746 rc = DBGFRZTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
14747 if (rc == VINF_EM_RAW_GUEST_TRAP)
14748 {
14749 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
14750 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
14751 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
14752 AssertRCReturn(rc, rc);
14753
14754 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
14755 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
14756 }
14757
14758 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
14759 return rc;
14760}
14761
14762
14763/**
14764 * VM-exit exception handler for \#AC (alignment check exception).
14765 */
14766static int hmR0VmxExitXcptAC(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14767{
14768 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14769
14770 /*
14771 * Re-inject it. We'll detect any nesting before getting here.
14772 */
14773 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
14774 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
14775 AssertRCReturn(rc, rc);
14776 Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
14777
14778 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
14779 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
14780 return VINF_SUCCESS;
14781}
14782
14783
14784/**
14785 * VM-exit exception handler for \#DB (Debug exception).
14786 */
14787static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14788{
14789 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14790 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
14791
14792 /*
14793 * Get the DR6-like values from the VM-exit qualification and pass it to DBGF
14794 * for processing.
14795 */
14796 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
14797
14798 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
14799 uint64_t const uDR6 = X86_DR6_INIT_VAL
14800 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
14801 | X86_DR6_BD | X86_DR6_BS));
14802
14803 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14804 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
14805 Log6Func(("rc=%Rrc\n", rc));
14806 if (rc == VINF_EM_RAW_GUEST_TRAP)
14807 {
14808 /*
14809 * The exception was for the guest. Update DR6, DR7.GD and
14810 * IA32_DEBUGCTL.LBR before forwarding it.
14811 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
14812 */
14813 VMMRZCallRing3Disable(pVCpu);
14814 HM_DISABLE_PREEMPT(pVCpu);
14815
14816 pCtx->dr[6] &= ~X86_DR6_B_MASK;
14817 pCtx->dr[6] |= uDR6;
14818 if (CPUMIsGuestDebugStateActive(pVCpu))
14819 ASMSetDR6(pCtx->dr[6]);
14820
14821 HM_RESTORE_PREEMPT();
14822 VMMRZCallRing3Enable(pVCpu);
14823
14824 rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
14825 AssertRCReturn(rc, rc);
14826
14827 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
14828 pCtx->dr[7] &= ~X86_DR7_GD;
14829
14830 /* Paranoia. */
14831 pCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
14832 pCtx->dr[7] |= X86_DR7_RA1_MASK;
14833
14834 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pCtx->dr[7]);
14835 AssertRCReturn(rc, rc);
14836
14837 /*
14838 * Raise #DB in the guest.
14839 *
14840 * It is important to reflect exactly what the VM-exit gave us (preserving the
14841 * interruption-type) rather than use hmR0VmxSetPendingXcptDB() as the #DB could've
14842 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
14843 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
14844 *
14845 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
14846 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
14847 */
14848 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
14849 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
14850 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
14851 AssertRCReturn(rc, rc);
14852 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
14853 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
14854 return VINF_SUCCESS;
14855 }
14856
14857 /*
14858 * Not a guest trap, must be a hypervisor related debug event then.
14859 * Update DR6 in case someone is interested in it.
14860 */
14861 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
14862 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
14863 CPUMSetHyperDR6(pVCpu, uDR6);
14864
14865 return rc;
14866}
14867
14868
14869/**
14870 * Hacks its way around the lovely mesa driver's backdoor accesses.
14871 *
14872 * @sa hmR0SvmHandleMesaDrvGp.
14873 */
14874static int hmR0VmxHandleMesaDrvGp(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
14875{
14876 LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
14877 RT_NOREF(pCtx);
14878
14879 /* For now we'll just skip the instruction. */
14880 return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
14881}
14882
14883
14884/**
14885 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
14886 * backdoor logging w/o checking what it is running inside.
14887 *
14888 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
14889 * backdoor port and magic numbers loaded in registers.
14890 *
14891 * @returns true if it is, false if it isn't.
14892 * @sa hmR0SvmIsMesaDrvGp.
14893 */
14894DECLINLINE(bool) hmR0VmxIsMesaDrvGp(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
14895{
14896 /* 0xed: IN eAX,dx */
14897 uint8_t abInstr[1];
14898 if (pVmxTransient->cbInstr != sizeof(abInstr))
14899 return false;
14900
14901 /* Check that it is #GP(0). */
14902 if (pVmxTransient->uExitIntErrorCode != 0)
14903 return false;
14904
14905 /* Check magic and port. */
14906 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
14907 /*Log(("hmR0VmxIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
14908 if (pCtx->rax != UINT32_C(0x564d5868))
14909 return false;
14910 if (pCtx->dx != UINT32_C(0x5658))
14911 return false;
14912
14913 /* Flat ring-3 CS. */
14914 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
14915 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
14916 /*Log(("hmR0VmxIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
14917 if (pCtx->cs.Attr.n.u2Dpl != 3)
14918 return false;
14919 if (pCtx->cs.u64Base != 0)
14920 return false;
14921
14922 /* Check opcode. */
14923 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
14924 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
14925 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
14926 /*Log(("hmR0VmxIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
14927 if (RT_FAILURE(rc))
14928 return false;
14929 if (abInstr[0] != 0xed)
14930 return false;
14931
14932 return true;
14933}
14934
14935
14936/**
14937 * VM-exit exception handler for \#GP (General-protection exception).
14938 *
14939 * @remarks Requires pVmxTransient->uExitIntInfo to be up-to-date.
14940 */
14941static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
14942{
14943 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14944 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
14945
14946 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14947 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14948 if (pVmcsInfo->RealMode.fRealOnV86Active)
14949 { /* likely */ }
14950 else
14951 {
14952#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
14953 Assert(pVCpu->hm.s.fUsingDebugLoop || pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv);
14954#endif
14955 /* If the guest is not in real-mode or we have unrestricted guest execution support, reflect #GP to the guest. */
14956 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
14957 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
14958 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
14959 rc |= hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
14960 AssertRCReturn(rc, rc);
14961 Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
14962 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
14963
14964 if ( !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
14965 || !hmR0VmxIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
14966 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
14967 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
14968 else
14969 rc = hmR0VmxHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
14970 return rc;
14971 }
14972
14973 Assert(CPUMIsGuestInRealModeEx(pCtx));
14974 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
14975
14976 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
14977 AssertRCReturn(rc, rc);
14978
14979 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
14980 if (rcStrict == VINF_SUCCESS)
14981 {
14982 if (!CPUMIsGuestInRealModeEx(pCtx))
14983 {
14984 /*
14985 * The guest is no longer in real-mode, check if we can continue executing the
14986 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
14987 */
14988 pVmcsInfo->RealMode.fRealOnV86Active = false;
14989 if (HMCanExecuteVmxGuest(pVCpu, pCtx))
14990 {
14991 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
14992 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
14993 }
14994 else
14995 {
14996 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
14997 rcStrict = VINF_EM_RESCHEDULE;
14998 }
14999 }
15000 else
15001 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
15002 }
15003 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15004 {
15005 rcStrict = VINF_SUCCESS;
15006 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15007 }
15008 return VBOXSTRICTRC_VAL(rcStrict);
15009}
15010
15011
15012/**
15013 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
15014 * the exception reported in the VMX transient structure back into the VM.
15015 *
15016 * @remarks Requires uExitIntInfo in the VMX transient structure to be
15017 * up-to-date.
15018 */
15019static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15020{
15021 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15022#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
15023 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
15024 AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVmcsInfo->RealMode.fRealOnV86Active,
15025 ("uVector=%#x u32XcptBitmap=%#X32\n",
15026 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
15027 NOREF(pVmcsInfo);
15028#endif
15029
15030 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
15031 hmR0VmxCheckExitDueToEventDelivery(). */
15032 int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
15033 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15034 AssertRCReturn(rc, rc);
15035 Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO);
15036
15037#ifdef DEBUG_ramshankar
15038 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
15039 Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n",
15040 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pCtx->cs.Sel, pCtx->rip));
15041#endif
15042
15043 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,
15044 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
15045 return VINF_SUCCESS;
15046}
15047
15048
15049/**
15050 * VM-exit exception handler for \#PF (Page-fault exception).
15051 */
15052static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15053{
15054 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15055 PVM pVM = pVCpu->CTX_SUFF(pVM);
15056 int rc = hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
15057 rc |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
15058 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
15059 AssertRCReturn(rc, rc);
15060
15061 if (!pVM->hm.s.fNestedPaging)
15062 { /* likely */ }
15063 else
15064 {
15065#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF)
15066 Assert(pVCpu->hm.s.fUsingDebugLoop);
15067#endif
15068 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
15069 if (RT_LIKELY(!pVmxTransient->fVectoringDoublePF))
15070 {
15071 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
15072 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
15073 }
15074 else
15075 {
15076 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
15077 hmR0VmxSetPendingXcptDF(pVCpu);
15078 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
15079 }
15080 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
15081 return rc;
15082 }
15083
15084 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
15085 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
15086 if (pVmxTransient->fVectoringPF)
15087 {
15088 Assert(pVCpu->hm.s.Event.fPending);
15089 return VINF_EM_RAW_INJECT_TRPM_EVENT;
15090 }
15091
15092 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
15093 rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
15094 AssertRCReturn(rc, rc);
15095
15096 Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQual, pCtx->cs.Sel,
15097 pCtx->rip, pVmxTransient->uExitIntErrorCode, pCtx->cr3));
15098
15099 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
15100 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
15101
15102 Log4Func(("#PF: rc=%Rrc\n", rc));
15103 if (rc == VINF_SUCCESS)
15104 {
15105 /*
15106 * This is typically a shadow page table sync or a MMIO instruction. But we may have
15107 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
15108 */
15109 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
15110 TRPMResetTrap(pVCpu);
15111 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
15112 return rc;
15113 }
15114
15115 if (rc == VINF_EM_RAW_GUEST_TRAP)
15116 {
15117 if (!pVmxTransient->fVectoringDoublePF)
15118 {
15119 /* It's a guest page fault and needs to be reflected to the guest. */
15120 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
15121 TRPMResetTrap(pVCpu);
15122 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
15123 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
15124 uGstErrorCode, pVmxTransient->uExitQual);
15125 }
15126 else
15127 {
15128 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
15129 TRPMResetTrap(pVCpu);
15130 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
15131 hmR0VmxSetPendingXcptDF(pVCpu);
15132 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
15133 }
15134
15135 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
15136 return VINF_SUCCESS;
15137 }
15138
15139 TRPMResetTrap(pVCpu);
15140 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
15141 return rc;
15142}
15143
15144#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15145/** @name VMX instruction handlers.
15146 * @{
15147 */
15148/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
15149/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VMX instructions VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
15150/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
15151
15152/**
15153 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
15154 */
15155HMVMX_EXIT_DECL hmR0VmxExitVmclear(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15156{
15157 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15158
15159 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15160 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
15161 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
15162 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
15163 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
15164 AssertRCReturn(rc, rc);
15165
15166 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15167
15168 VMXVEXITINFO ExitInfo;
15169 RT_ZERO(ExitInfo);
15170 ExitInfo.uReason = pVmxTransient->uExitReason;
15171 ExitInfo.u64Qual = pVmxTransient->uExitQual;
15172 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
15173 ExitInfo.cbInstr = pVmxTransient->cbInstr;
15174 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
15175
15176 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
15177 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15178 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
15179 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15180 {
15181 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15182 rcStrict = VINF_SUCCESS;
15183 }
15184 return rcStrict;
15185}
15186
15187
15188/**
15189 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
15190 */
15191HMVMX_EXIT_DECL hmR0VmxExitVmlaunch(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15192{
15193 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15194
15195 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
15196 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
15197 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15198 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
15199 AssertRCReturn(rc, rc);
15200
15201 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15202
15203 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbInstr, VMXINSTRID_VMLAUNCH);
15204 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15205 {
15206 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
15207 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
15208 }
15209 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
15210 return rcStrict;
15211}
15212
15213
15214/**
15215 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
15216 */
15217HMVMX_EXIT_DECL hmR0VmxExitVmptrld(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15218{
15219 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15220
15221 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15222 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
15223 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
15224 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
15225 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
15226 AssertRCReturn(rc, rc);
15227
15228 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15229
15230 VMXVEXITINFO ExitInfo;
15231 RT_ZERO(ExitInfo);
15232 ExitInfo.uReason = pVmxTransient->uExitReason;
15233 ExitInfo.u64Qual = pVmxTransient->uExitQual;
15234 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
15235 ExitInfo.cbInstr = pVmxTransient->cbInstr;
15236 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
15237
15238 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
15239 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15240 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
15241 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15242 {
15243 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15244 rcStrict = VINF_SUCCESS;
15245 }
15246 return rcStrict;
15247}
15248
15249
15250/**
15251 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
15252 */
15253HMVMX_EXIT_DECL hmR0VmxExitVmptrst(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15254{
15255 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15256
15257 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15258 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
15259 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
15260 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
15261 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
15262 AssertRCReturn(rc, rc);
15263
15264 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15265
15266 VMXVEXITINFO ExitInfo;
15267 RT_ZERO(ExitInfo);
15268 ExitInfo.uReason = pVmxTransient->uExitReason;
15269 ExitInfo.u64Qual = pVmxTransient->uExitQual;
15270 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
15271 ExitInfo.cbInstr = pVmxTransient->cbInstr;
15272 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
15273
15274 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
15275 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15276 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
15277 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15278 {
15279 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15280 rcStrict = VINF_SUCCESS;
15281 }
15282 return rcStrict;
15283}
15284
15285
15286/**
15287 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Unconditional VM-exit.
15288 */
15289HMVMX_EXIT_DECL hmR0VmxExitVmread(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15290{
15291 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15292
15293 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15294 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
15295 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
15296 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
15297 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
15298 AssertRCReturn(rc, rc);
15299
15300 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15301
15302 VMXVEXITINFO ExitInfo;
15303 RT_ZERO(ExitInfo);
15304 ExitInfo.uReason = pVmxTransient->uExitReason;
15305 ExitInfo.u64Qual = pVmxTransient->uExitQual;
15306 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
15307 ExitInfo.cbInstr = pVmxTransient->cbInstr;
15308 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
15309 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
15310
15311 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
15312 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15313 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
15314 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15315 {
15316 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15317 rcStrict = VINF_SUCCESS;
15318 }
15319 return rcStrict;
15320}
15321
15322
15323/**
15324 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
15325 */
15326HMVMX_EXIT_DECL hmR0VmxExitVmresume(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15327{
15328 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15329
15330 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
15331 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
15332 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15333 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
15334 AssertRCReturn(rc, rc);
15335
15336 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15337
15338 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbInstr, VMXINSTRID_VMRESUME);
15339 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15340 {
15341 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
15342 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
15343 }
15344 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
15345 return rcStrict;
15346}
15347
15348
15349/**
15350 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Unconditional VM-exit.
15351 */
15352HMVMX_EXIT_DECL hmR0VmxExitVmwrite(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15353{
15354 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15355
15356 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15357 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
15358 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
15359 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
15360 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
15361 AssertRCReturn(rc, rc);
15362
15363 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15364
15365 VMXVEXITINFO ExitInfo;
15366 RT_ZERO(ExitInfo);
15367 ExitInfo.uReason = pVmxTransient->uExitReason;
15368 ExitInfo.u64Qual = pVmxTransient->uExitQual;
15369 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
15370 ExitInfo.cbInstr = pVmxTransient->cbInstr;
15371 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
15372 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
15373
15374 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
15375 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15376 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
15377 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15378 {
15379 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15380 rcStrict = VINF_SUCCESS;
15381 }
15382 return rcStrict;
15383}
15384
15385
15386/**
15387 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
15388 */
15389HMVMX_EXIT_DECL hmR0VmxExitVmxoff(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15390{
15391 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15392
15393 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15394 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
15395 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
15396 AssertRCReturn(rc, rc);
15397
15398 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15399
15400 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbInstr);
15401 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15402 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
15403 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15404 {
15405 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15406 rcStrict = VINF_SUCCESS;
15407 }
15408 return rcStrict;
15409}
15410
15411
15412/**
15413 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
15414 */
15415HMVMX_EXIT_DECL hmR0VmxExitVmxon(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
15416{
15417 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15418
15419 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15420 rc |= hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
15421 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
15422 rc |= hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
15423 rc |= hmR0VmxReadExitQualVmcs(pVCpu, pVmxTransient);
15424 AssertRCReturn(rc, rc);
15425
15426 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
15427
15428 VMXVEXITINFO ExitInfo;
15429 RT_ZERO(ExitInfo);
15430 ExitInfo.uReason = pVmxTransient->uExitReason;
15431 ExitInfo.u64Qual = pVmxTransient->uExitQual;
15432 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
15433 ExitInfo.cbInstr = pVmxTransient->cbInstr;
15434 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
15435
15436 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
15437 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15438 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
15439 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15440 {
15441 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15442 rcStrict = VINF_SUCCESS;
15443 }
15444 return rcStrict;
15445}
15446
15447/** @} */
15448#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
15449
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette