VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 92679

Last change on this file since 92679 was 92626, checked in by vboxsync, 3 years ago

VMM: Nested VMX: bugref:10092 Adjust PGM APIs and translate nested-guest CR3 prior to mapping them when switching mode and other places.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 740.0 KB
Line 
1/* $Id: HMVMXR0.cpp 92626 2021-11-29 12:32:58Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/x86.h>
25#include <iprt/asm-amd64-x86.h>
26#include <iprt/thread.h>
27#include <iprt/mem.h>
28#include <iprt/mp.h>
29
30#include <VBox/vmm/pdmapi.h>
31#include <VBox/vmm/dbgf.h>
32#include <VBox/vmm/iem.h>
33#include <VBox/vmm/iom.h>
34#include <VBox/vmm/tm.h>
35#include <VBox/vmm/em.h>
36#include <VBox/vmm/gim.h>
37#include <VBox/vmm/apic.h>
38#include "HMInternal.h"
39#include <VBox/vmm/vmcc.h>
40#include <VBox/vmm/hmvmxinline.h>
41#include "HMVMXR0.h"
42#include "dtrace/VBoxVMM.h"
43
44#ifdef DEBUG_ramshankar
45# define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS
46# define HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
47# define HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE
48# define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
49# define HMVMX_ALWAYS_CLEAN_TRANSIENT
50# define HMVMX_ALWAYS_CHECK_GUEST_STATE
51# define HMVMX_ALWAYS_TRAP_ALL_XCPTS
52# define HMVMX_ALWAYS_TRAP_PF
53# define HMVMX_ALWAYS_FLUSH_TLB
54# define HMVMX_ALWAYS_SWAP_EFER
55#endif
56
57
58/*********************************************************************************************************************************
59* Defined Constants And Macros *
60*********************************************************************************************************************************/
61/** Use the function table. */
62#define HMVMX_USE_FUNCTION_TABLE
63
64/** Determine which tagged-TLB flush handler to use. */
65#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
66#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
67#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
68#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
69
70/**
71 * Flags to skip redundant reads of some common VMCS fields that are not part of
72 * the guest-CPU or VCPU state but are needed while handling VM-exits.
73 */
74#define HMVMX_READ_IDT_VECTORING_INFO RT_BIT_32(0)
75#define HMVMX_READ_IDT_VECTORING_ERROR_CODE RT_BIT_32(1)
76#define HMVMX_READ_EXIT_QUALIFICATION RT_BIT_32(2)
77#define HMVMX_READ_EXIT_INSTR_LEN RT_BIT_32(3)
78#define HMVMX_READ_EXIT_INTERRUPTION_INFO RT_BIT_32(4)
79#define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5)
80#define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6)
81#define HMVMX_READ_GUEST_LINEAR_ADDR RT_BIT_32(7)
82#define HMVMX_READ_GUEST_PHYSICAL_ADDR RT_BIT_32(8)
83#define HMVMX_READ_GUEST_PENDING_DBG_XCPTS RT_BIT_32(9)
84
85/** All the VMCS fields required for processing of exception/NMI VM-exits. */
86#define HMVMX_READ_XCPT_INFO ( HMVMX_READ_EXIT_INTERRUPTION_INFO \
87 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE \
88 | HMVMX_READ_EXIT_INSTR_LEN \
89 | HMVMX_READ_IDT_VECTORING_INFO \
90 | HMVMX_READ_IDT_VECTORING_ERROR_CODE)
91
92/** Assert that all the given fields have been read from the VMCS. */
93#ifdef VBOX_STRICT
94# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) \
95 do { \
96 uint32_t const fVmcsFieldRead = ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead); \
97 Assert((fVmcsFieldRead & (a_fReadFields)) == (a_fReadFields)); \
98 } while (0)
99#else
100# define HMVMX_ASSERT_READ(a_pVmxTransient, a_fReadFields) do { } while (0)
101#endif
102
103/**
104 * Subset of the guest-CPU state that is kept by VMX R0 code while executing the
105 * guest using hardware-assisted VMX.
106 *
107 * This excludes state like GPRs (other than RSP) which are always are
108 * swapped and restored across the world-switch and also registers like EFER,
109 * MSR which cannot be modified by the guest without causing a VM-exit.
110 */
111#define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \
112 | CPUMCTX_EXTRN_RFLAGS \
113 | CPUMCTX_EXTRN_RSP \
114 | CPUMCTX_EXTRN_SREG_MASK \
115 | CPUMCTX_EXTRN_TABLE_MASK \
116 | CPUMCTX_EXTRN_KERNEL_GS_BASE \
117 | CPUMCTX_EXTRN_SYSCALL_MSRS \
118 | CPUMCTX_EXTRN_SYSENTER_MSRS \
119 | CPUMCTX_EXTRN_TSC_AUX \
120 | CPUMCTX_EXTRN_OTHER_MSRS \
121 | CPUMCTX_EXTRN_CR0 \
122 | CPUMCTX_EXTRN_CR3 \
123 | CPUMCTX_EXTRN_CR4 \
124 | CPUMCTX_EXTRN_DR7 \
125 | CPUMCTX_EXTRN_HWVIRT \
126 | CPUMCTX_EXTRN_INHIBIT_INT \
127 | CPUMCTX_EXTRN_INHIBIT_NMI)
128
129/**
130 * Exception bitmap mask for real-mode guests (real-on-v86).
131 *
132 * We need to intercept all exceptions manually except:
133 * - \#AC and \#DB are always intercepted to prevent the CPU from deadlocking
134 * due to bugs in Intel CPUs.
135 * - \#PF need not be intercepted even in real-mode if we have nested paging
136 * support.
137 */
138#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) /* always: | RT_BIT(X86_XCPT_DB) */ | RT_BIT(X86_XCPT_NMI) \
139 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
140 | RT_BIT(X86_XCPT_UD) | RT_BIT(X86_XCPT_NM) | RT_BIT(X86_XCPT_DF) \
141 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
142 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
143 | RT_BIT(X86_XCPT_MF) /* always: | RT_BIT(X86_XCPT_AC) */ | RT_BIT(X86_XCPT_MC) \
144 | RT_BIT(X86_XCPT_XF))
145
146/** Maximum VM-instruction error number. */
147#define HMVMX_INSTR_ERROR_MAX 28
148
149/** Profiling macro. */
150#ifdef HM_PROFILE_EXIT_DISPATCH
151# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
152# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
153#else
154# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
155# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
156#endif
157
158/** Assert that preemption is disabled or covered by thread-context hooks. */
159#define HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu) Assert( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
160 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD))
161
162/** Assert that we haven't migrated CPUs when thread-context hooks are not
163 * used. */
164#define HMVMX_ASSERT_CPU_SAFE(a_pVCpu) AssertMsg( VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
165 || (a_pVCpu)->hmr0.s.idEnteredCpu == RTMpCpuId(), \
166 ("Illegal migration! Entered on CPU %u Current %u\n", \
167 (a_pVCpu)->hmr0.s.idEnteredCpu, RTMpCpuId()))
168
169/** Asserts that the given CPUMCTX_EXTRN_XXX bits are present in the guest-CPU
170 * context. */
171#define HMVMX_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
172 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
173 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
174
175/** Log the VM-exit reason with an easily visible marker to identify it in a
176 * potential sea of logging data. */
177#define HMVMX_LOG_EXIT(a_pVCpu, a_uExitReason) \
178 do { \
179 Log4(("VM-exit: vcpu[%RU32] %85s -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (a_pVCpu)->idCpu, \
180 HMGetVmxExitName(a_uExitReason))); \
181 } while (0) \
182
183
184/*********************************************************************************************************************************
185* Structures and Typedefs *
186*********************************************************************************************************************************/
187/**
188 * VMX per-VCPU transient state.
189 *
190 * A state structure for holding miscellaneous information across
191 * VMX non-root operation and restored after the transition.
192 *
193 * Note: The members are ordered and aligned such that the most
194 * frequently used ones (in the guest execution loop) fall within
195 * the first cache line.
196 */
197typedef struct VMXTRANSIENT
198{
199 /** Mask of currently read VMCS fields; HMVMX_READ_XXX. */
200 uint32_t fVmcsFieldsRead;
201 /** The guest's TPR value used for TPR shadowing. */
202 uint8_t u8GuestTpr;
203 uint8_t abAlignment0[3];
204
205 /** Whether the VM-exit was caused by a page-fault during delivery of an
206 * external interrupt or NMI. */
207 bool fVectoringPF;
208 /** Whether the VM-exit was caused by a page-fault during delivery of a
209 * contributory exception or a page-fault. */
210 bool fVectoringDoublePF;
211 /** Whether the VM-entry failed or not. */
212 bool fVMEntryFailed;
213 /** Whether the TSC_AUX MSR needs to be removed from the auto-load/store MSR
214 * area after VM-exit. */
215 bool fRemoveTscAuxMsr;
216 /** Whether TSC-offsetting and VMX-preemption timer was updated before VM-entry. */
217 bool fUpdatedTscOffsettingAndPreemptTimer;
218 /** Whether we are currently executing a nested-guest. */
219 bool fIsNestedGuest;
220 /** Whether the guest debug state was active at the time of VM-exit. */
221 bool fWasGuestDebugStateActive;
222 /** Whether the hyper debug state was active at the time of VM-exit. */
223 bool fWasHyperDebugStateActive;
224
225 /** The basic VM-exit reason. */
226 uint32_t uExitReason;
227 /** The VM-exit interruption error code. */
228 uint32_t uExitIntErrorCode;
229
230 /** The host's rflags/eflags. */
231 RTCCUINTREG fEFlags;
232
233 /** The VM-exit exit code qualification. */
234 uint64_t uExitQual;
235
236 /** The VMCS info. object. */
237 PVMXVMCSINFO pVmcsInfo;
238
239 /** The VM-exit interruption-information field. */
240 uint32_t uExitIntInfo;
241 /** The VM-exit instruction-length field. */
242 uint32_t cbExitInstr;
243
244 /** The VM-exit instruction-information field. */
245 VMXEXITINSTRINFO ExitInstrInfo;
246 /** IDT-vectoring information field. */
247 uint32_t uIdtVectoringInfo;
248
249 /** IDT-vectoring error code. */
250 uint32_t uIdtVectoringErrorCode;
251 uint32_t u32Alignment0;
252
253 /** The Guest-linear address. */
254 uint64_t uGuestLinearAddr;
255
256 /** The Guest-physical address. */
257 uint64_t uGuestPhysicalAddr;
258
259 /** The Guest pending-debug exceptions. */
260 uint64_t uGuestPendingDbgXcpts;
261
262 /** The VM-entry interruption-information field. */
263 uint32_t uEntryIntInfo;
264 /** The VM-entry exception error code field. */
265 uint32_t uEntryXcptErrorCode;
266
267 /** The VM-entry instruction length field. */
268 uint32_t cbEntryInstr;
269} VMXTRANSIENT;
270AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
271AssertCompileMemberAlignment(VMXTRANSIENT, fVmcsFieldsRead, 8);
272AssertCompileMemberAlignment(VMXTRANSIENT, fVectoringPF, 8);
273AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, 8);
274AssertCompileMemberAlignment(VMXTRANSIENT, fEFlags, 8);
275AssertCompileMemberAlignment(VMXTRANSIENT, uExitQual, 8);
276AssertCompileMemberAlignment(VMXTRANSIENT, pVmcsInfo, 8);
277AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntInfo, 8);
278AssertCompileMemberAlignment(VMXTRANSIENT, ExitInstrInfo, 8);
279AssertCompileMemberAlignment(VMXTRANSIENT, uIdtVectoringErrorCode, 8);
280AssertCompileMemberAlignment(VMXTRANSIENT, uGuestLinearAddr, 8);
281AssertCompileMemberAlignment(VMXTRANSIENT, uGuestPhysicalAddr, 8);
282AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntInfo, 8);
283AssertCompileMemberAlignment(VMXTRANSIENT, cbEntryInstr, 8);
284/** Pointer to VMX transient state. */
285typedef VMXTRANSIENT *PVMXTRANSIENT;
286/** Pointer to a const VMX transient state. */
287typedef const VMXTRANSIENT *PCVMXTRANSIENT;
288
289/**
290 * VMX page allocation information.
291 */
292typedef struct
293{
294 uint32_t fValid; /**< Whether to allocate this page (e.g, based on a CPU feature). */
295 uint32_t uPadding0; /**< Padding to ensure array of these structs are aligned to a multiple of 8. */
296 PRTHCPHYS pHCPhys; /**< Where to store the host-physical address of the allocation. */
297 PRTR0PTR ppVirt; /**< Where to store the host-virtual address of the allocation. */
298} VMXPAGEALLOCINFO;
299/** Pointer to VMX page-allocation info. */
300typedef VMXPAGEALLOCINFO *PVMXPAGEALLOCINFO;
301/** Pointer to a const VMX page-allocation info. */
302typedef const VMXPAGEALLOCINFO *PCVMXPAGEALLOCINFO;
303AssertCompileSizeAlignment(VMXPAGEALLOCINFO, 8);
304
305/**
306 * Memory operand read or write access.
307 */
308typedef enum VMXMEMACCESS
309{
310 VMXMEMACCESS_READ = 0,
311 VMXMEMACCESS_WRITE = 1
312} VMXMEMACCESS;
313
314/**
315 * VMX VM-exit handler.
316 *
317 * @returns Strict VBox status code (i.e. informational status codes too).
318 * @param pVCpu The cross context virtual CPU structure.
319 * @param pVmxTransient The VMX-transient structure.
320 */
321#ifndef HMVMX_USE_FUNCTION_TABLE
322typedef VBOXSTRICTRC FNVMXEXITHANDLER(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
323#else
324typedef DECLCALLBACKTYPE(VBOXSTRICTRC, FNVMXEXITHANDLER,(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient));
325/** Pointer to VM-exit handler. */
326typedef FNVMXEXITHANDLER *PFNVMXEXITHANDLER;
327#endif
328
329/**
330 * VMX VM-exit handler, non-strict status code.
331 *
332 * This is generally the same as FNVMXEXITHANDLER, the NSRC bit is just FYI.
333 *
334 * @returns VBox status code, no informational status code returned.
335 * @param pVCpu The cross context virtual CPU structure.
336 * @param pVmxTransient The VMX-transient structure.
337 *
338 * @remarks This is not used on anything returning VERR_EM_INTERPRETER as the
339 * use of that status code will be replaced with VINF_EM_SOMETHING
340 * later when switching over to IEM.
341 */
342#ifndef HMVMX_USE_FUNCTION_TABLE
343typedef int FNVMXEXITHANDLERNSRC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
344#else
345typedef FNVMXEXITHANDLER FNVMXEXITHANDLERNSRC;
346#endif
347
348
349/*********************************************************************************************************************************
350* Internal Functions *
351*********************************************************************************************************************************/
352#ifndef HMVMX_USE_FUNCTION_TABLE
353DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
354# define HMVMX_EXIT_DECL DECLINLINE(VBOXSTRICTRC)
355# define HMVMX_EXIT_NSRC_DECL DECLINLINE(int)
356#else
357# define HMVMX_EXIT_DECL static DECLCALLBACK(VBOXSTRICTRC)
358# define HMVMX_EXIT_NSRC_DECL HMVMX_EXIT_DECL
359#endif
360#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
361DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient);
362#endif
363
364static int hmR0VmxImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat);
365
366/** @name VM-exit handler prototypes.
367 * @{
368 */
369static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmi;
370static FNVMXEXITHANDLER hmR0VmxExitExtInt;
371static FNVMXEXITHANDLER hmR0VmxExitTripleFault;
372static FNVMXEXITHANDLERNSRC hmR0VmxExitIntWindow;
373static FNVMXEXITHANDLERNSRC hmR0VmxExitNmiWindow;
374static FNVMXEXITHANDLER hmR0VmxExitTaskSwitch;
375static FNVMXEXITHANDLER hmR0VmxExitCpuid;
376static FNVMXEXITHANDLER hmR0VmxExitGetsec;
377static FNVMXEXITHANDLER hmR0VmxExitHlt;
378static FNVMXEXITHANDLERNSRC hmR0VmxExitInvd;
379static FNVMXEXITHANDLER hmR0VmxExitInvlpg;
380static FNVMXEXITHANDLER hmR0VmxExitRdpmc;
381static FNVMXEXITHANDLER hmR0VmxExitVmcall;
382#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
383static FNVMXEXITHANDLER hmR0VmxExitVmclear;
384static FNVMXEXITHANDLER hmR0VmxExitVmlaunch;
385static FNVMXEXITHANDLER hmR0VmxExitVmptrld;
386static FNVMXEXITHANDLER hmR0VmxExitVmptrst;
387static FNVMXEXITHANDLER hmR0VmxExitVmread;
388static FNVMXEXITHANDLER hmR0VmxExitVmresume;
389static FNVMXEXITHANDLER hmR0VmxExitVmwrite;
390static FNVMXEXITHANDLER hmR0VmxExitVmxoff;
391static FNVMXEXITHANDLER hmR0VmxExitVmxon;
392static FNVMXEXITHANDLER hmR0VmxExitInvvpid;
393#endif
394static FNVMXEXITHANDLER hmR0VmxExitRdtsc;
395static FNVMXEXITHANDLER hmR0VmxExitMovCRx;
396static FNVMXEXITHANDLER hmR0VmxExitMovDRx;
397static FNVMXEXITHANDLER hmR0VmxExitIoInstr;
398static FNVMXEXITHANDLER hmR0VmxExitRdmsr;
399static FNVMXEXITHANDLER hmR0VmxExitWrmsr;
400static FNVMXEXITHANDLER hmR0VmxExitMwait;
401static FNVMXEXITHANDLER hmR0VmxExitMtf;
402static FNVMXEXITHANDLER hmR0VmxExitMonitor;
403static FNVMXEXITHANDLER hmR0VmxExitPause;
404static FNVMXEXITHANDLERNSRC hmR0VmxExitTprBelowThreshold;
405static FNVMXEXITHANDLER hmR0VmxExitApicAccess;
406static FNVMXEXITHANDLER hmR0VmxExitEptViolation;
407static FNVMXEXITHANDLER hmR0VmxExitEptMisconfig;
408static FNVMXEXITHANDLER hmR0VmxExitRdtscp;
409static FNVMXEXITHANDLER hmR0VmxExitPreemptTimer;
410static FNVMXEXITHANDLERNSRC hmR0VmxExitWbinvd;
411static FNVMXEXITHANDLER hmR0VmxExitXsetbv;
412static FNVMXEXITHANDLER hmR0VmxExitInvpcid;
413static FNVMXEXITHANDLERNSRC hmR0VmxExitSetPendingXcptUD;
414static FNVMXEXITHANDLERNSRC hmR0VmxExitErrInvalidGuestState;
415static FNVMXEXITHANDLERNSRC hmR0VmxExitErrUnexpected;
416/** @} */
417
418#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
419/** @name Nested-guest VM-exit handler prototypes.
420 * @{
421 */
422static FNVMXEXITHANDLER hmR0VmxExitXcptOrNmiNested;
423static FNVMXEXITHANDLER hmR0VmxExitTripleFaultNested;
424static FNVMXEXITHANDLERNSRC hmR0VmxExitIntWindowNested;
425static FNVMXEXITHANDLERNSRC hmR0VmxExitNmiWindowNested;
426static FNVMXEXITHANDLER hmR0VmxExitTaskSwitchNested;
427static FNVMXEXITHANDLER hmR0VmxExitHltNested;
428static FNVMXEXITHANDLER hmR0VmxExitInvlpgNested;
429static FNVMXEXITHANDLER hmR0VmxExitRdpmcNested;
430static FNVMXEXITHANDLER hmR0VmxExitVmreadVmwriteNested;
431static FNVMXEXITHANDLER hmR0VmxExitRdtscNested;
432static FNVMXEXITHANDLER hmR0VmxExitMovCRxNested;
433static FNVMXEXITHANDLER hmR0VmxExitMovDRxNested;
434static FNVMXEXITHANDLER hmR0VmxExitIoInstrNested;
435static FNVMXEXITHANDLER hmR0VmxExitRdmsrNested;
436static FNVMXEXITHANDLER hmR0VmxExitWrmsrNested;
437static FNVMXEXITHANDLER hmR0VmxExitMwaitNested;
438static FNVMXEXITHANDLER hmR0VmxExitMtfNested;
439static FNVMXEXITHANDLER hmR0VmxExitMonitorNested;
440static FNVMXEXITHANDLER hmR0VmxExitPauseNested;
441static FNVMXEXITHANDLERNSRC hmR0VmxExitTprBelowThresholdNested;
442static FNVMXEXITHANDLER hmR0VmxExitApicAccessNested;
443static FNVMXEXITHANDLER hmR0VmxExitApicWriteNested;
444static FNVMXEXITHANDLER hmR0VmxExitVirtEoiNested;
445static FNVMXEXITHANDLER hmR0VmxExitRdtscpNested;
446static FNVMXEXITHANDLERNSRC hmR0VmxExitWbinvdNested;
447static FNVMXEXITHANDLER hmR0VmxExitInvpcidNested;
448static FNVMXEXITHANDLERNSRC hmR0VmxExitErrInvalidGuestStateNested;
449static FNVMXEXITHANDLER hmR0VmxExitInstrNested;
450static FNVMXEXITHANDLER hmR0VmxExitInstrWithInfoNested;
451/** @} */
452#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
453
454
455/*********************************************************************************************************************************
456* Global Variables *
457*********************************************************************************************************************************/
458#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
459/**
460 * Array of all VMCS fields.
461 * Any fields added to the VT-x spec. should be added here.
462 *
463 * Currently only used to derive shadow VMCS fields for hardware-assisted execution
464 * of nested-guests.
465 */
466static const uint32_t g_aVmcsFields[] =
467{
468 /* 16-bit control fields. */
469 VMX_VMCS16_VPID,
470 VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR,
471 VMX_VMCS16_EPTP_INDEX,
472
473 /* 16-bit guest-state fields. */
474 VMX_VMCS16_GUEST_ES_SEL,
475 VMX_VMCS16_GUEST_CS_SEL,
476 VMX_VMCS16_GUEST_SS_SEL,
477 VMX_VMCS16_GUEST_DS_SEL,
478 VMX_VMCS16_GUEST_FS_SEL,
479 VMX_VMCS16_GUEST_GS_SEL,
480 VMX_VMCS16_GUEST_LDTR_SEL,
481 VMX_VMCS16_GUEST_TR_SEL,
482 VMX_VMCS16_GUEST_INTR_STATUS,
483 VMX_VMCS16_GUEST_PML_INDEX,
484
485 /* 16-bits host-state fields. */
486 VMX_VMCS16_HOST_ES_SEL,
487 VMX_VMCS16_HOST_CS_SEL,
488 VMX_VMCS16_HOST_SS_SEL,
489 VMX_VMCS16_HOST_DS_SEL,
490 VMX_VMCS16_HOST_FS_SEL,
491 VMX_VMCS16_HOST_GS_SEL,
492 VMX_VMCS16_HOST_TR_SEL,
493
494 /* 64-bit control fields. */
495 VMX_VMCS64_CTRL_IO_BITMAP_A_FULL,
496 VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH,
497 VMX_VMCS64_CTRL_IO_BITMAP_B_FULL,
498 VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH,
499 VMX_VMCS64_CTRL_MSR_BITMAP_FULL,
500 VMX_VMCS64_CTRL_MSR_BITMAP_HIGH,
501 VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL,
502 VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH,
503 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,
504 VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH,
505 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL,
506 VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH,
507 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL,
508 VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH,
509 VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL,
510 VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH,
511 VMX_VMCS64_CTRL_TSC_OFFSET_FULL,
512 VMX_VMCS64_CTRL_TSC_OFFSET_HIGH,
513 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL,
514 VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH,
515 VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL,
516 VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH,
517 VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL,
518 VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH,
519 VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL,
520 VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH,
521 VMX_VMCS64_CTRL_EPTP_FULL,
522 VMX_VMCS64_CTRL_EPTP_HIGH,
523 VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL,
524 VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH,
525 VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL,
526 VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH,
527 VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL,
528 VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH,
529 VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL,
530 VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH,
531 VMX_VMCS64_CTRL_EPTP_LIST_FULL,
532 VMX_VMCS64_CTRL_EPTP_LIST_HIGH,
533 VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL,
534 VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH,
535 VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL,
536 VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH,
537 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_FULL,
538 VMX_VMCS64_CTRL_VE_XCPT_INFO_ADDR_HIGH,
539 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL,
540 VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH,
541 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL,
542 VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH,
543 VMX_VMCS64_CTRL_SPPTP_FULL,
544 VMX_VMCS64_CTRL_SPPTP_HIGH,
545 VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL,
546 VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH,
547 VMX_VMCS64_CTRL_PROC_EXEC3_FULL,
548 VMX_VMCS64_CTRL_PROC_EXEC3_HIGH,
549 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_FULL,
550 VMX_VMCS64_CTRL_ENCLV_EXITING_BITMAP_HIGH,
551
552 /* 64-bit read-only data fields. */
553 VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL,
554 VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH,
555
556 /* 64-bit guest-state fields. */
557 VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL,
558 VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH,
559 VMX_VMCS64_GUEST_DEBUGCTL_FULL,
560 VMX_VMCS64_GUEST_DEBUGCTL_HIGH,
561 VMX_VMCS64_GUEST_PAT_FULL,
562 VMX_VMCS64_GUEST_PAT_HIGH,
563 VMX_VMCS64_GUEST_EFER_FULL,
564 VMX_VMCS64_GUEST_EFER_HIGH,
565 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL,
566 VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH,
567 VMX_VMCS64_GUEST_PDPTE0_FULL,
568 VMX_VMCS64_GUEST_PDPTE0_HIGH,
569 VMX_VMCS64_GUEST_PDPTE1_FULL,
570 VMX_VMCS64_GUEST_PDPTE1_HIGH,
571 VMX_VMCS64_GUEST_PDPTE2_FULL,
572 VMX_VMCS64_GUEST_PDPTE2_HIGH,
573 VMX_VMCS64_GUEST_PDPTE3_FULL,
574 VMX_VMCS64_GUEST_PDPTE3_HIGH,
575 VMX_VMCS64_GUEST_BNDCFGS_FULL,
576 VMX_VMCS64_GUEST_BNDCFGS_HIGH,
577 VMX_VMCS64_GUEST_RTIT_CTL_FULL,
578 VMX_VMCS64_GUEST_RTIT_CTL_HIGH,
579 VMX_VMCS64_GUEST_PKRS_FULL,
580 VMX_VMCS64_GUEST_PKRS_HIGH,
581
582 /* 64-bit host-state fields. */
583 VMX_VMCS64_HOST_PAT_FULL,
584 VMX_VMCS64_HOST_PAT_HIGH,
585 VMX_VMCS64_HOST_EFER_FULL,
586 VMX_VMCS64_HOST_EFER_HIGH,
587 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL,
588 VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH,
589 VMX_VMCS64_HOST_PKRS_FULL,
590 VMX_VMCS64_HOST_PKRS_HIGH,
591
592 /* 32-bit control fields. */
593 VMX_VMCS32_CTRL_PIN_EXEC,
594 VMX_VMCS32_CTRL_PROC_EXEC,
595 VMX_VMCS32_CTRL_EXCEPTION_BITMAP,
596 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK,
597 VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH,
598 VMX_VMCS32_CTRL_CR3_TARGET_COUNT,
599 VMX_VMCS32_CTRL_EXIT,
600 VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT,
601 VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,
602 VMX_VMCS32_CTRL_ENTRY,
603 VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT,
604 VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO,
605 VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE,
606 VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH,
607 VMX_VMCS32_CTRL_TPR_THRESHOLD,
608 VMX_VMCS32_CTRL_PROC_EXEC2,
609 VMX_VMCS32_CTRL_PLE_GAP,
610 VMX_VMCS32_CTRL_PLE_WINDOW,
611
612 /* 32-bits read-only fields. */
613 VMX_VMCS32_RO_VM_INSTR_ERROR,
614 VMX_VMCS32_RO_EXIT_REASON,
615 VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO,
616 VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE,
617 VMX_VMCS32_RO_IDT_VECTORING_INFO,
618 VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE,
619 VMX_VMCS32_RO_EXIT_INSTR_LENGTH,
620 VMX_VMCS32_RO_EXIT_INSTR_INFO,
621
622 /* 32-bit guest-state fields. */
623 VMX_VMCS32_GUEST_ES_LIMIT,
624 VMX_VMCS32_GUEST_CS_LIMIT,
625 VMX_VMCS32_GUEST_SS_LIMIT,
626 VMX_VMCS32_GUEST_DS_LIMIT,
627 VMX_VMCS32_GUEST_FS_LIMIT,
628 VMX_VMCS32_GUEST_GS_LIMIT,
629 VMX_VMCS32_GUEST_LDTR_LIMIT,
630 VMX_VMCS32_GUEST_TR_LIMIT,
631 VMX_VMCS32_GUEST_GDTR_LIMIT,
632 VMX_VMCS32_GUEST_IDTR_LIMIT,
633 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
634 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
635 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
636 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
637 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
638 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS,
639 VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS,
640 VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS,
641 VMX_VMCS32_GUEST_INT_STATE,
642 VMX_VMCS32_GUEST_ACTIVITY_STATE,
643 VMX_VMCS32_GUEST_SMBASE,
644 VMX_VMCS32_GUEST_SYSENTER_CS,
645 VMX_VMCS32_PREEMPT_TIMER_VALUE,
646
647 /* 32-bit host-state fields. */
648 VMX_VMCS32_HOST_SYSENTER_CS,
649
650 /* Natural-width control fields. */
651 VMX_VMCS_CTRL_CR0_MASK,
652 VMX_VMCS_CTRL_CR4_MASK,
653 VMX_VMCS_CTRL_CR0_READ_SHADOW,
654 VMX_VMCS_CTRL_CR4_READ_SHADOW,
655 VMX_VMCS_CTRL_CR3_TARGET_VAL0,
656 VMX_VMCS_CTRL_CR3_TARGET_VAL1,
657 VMX_VMCS_CTRL_CR3_TARGET_VAL2,
658 VMX_VMCS_CTRL_CR3_TARGET_VAL3,
659
660 /* Natural-width read-only data fields. */
661 VMX_VMCS_RO_EXIT_QUALIFICATION,
662 VMX_VMCS_RO_IO_RCX,
663 VMX_VMCS_RO_IO_RSI,
664 VMX_VMCS_RO_IO_RDI,
665 VMX_VMCS_RO_IO_RIP,
666 VMX_VMCS_RO_GUEST_LINEAR_ADDR,
667
668 /* Natural-width guest-state field */
669 VMX_VMCS_GUEST_CR0,
670 VMX_VMCS_GUEST_CR3,
671 VMX_VMCS_GUEST_CR4,
672 VMX_VMCS_GUEST_ES_BASE,
673 VMX_VMCS_GUEST_CS_BASE,
674 VMX_VMCS_GUEST_SS_BASE,
675 VMX_VMCS_GUEST_DS_BASE,
676 VMX_VMCS_GUEST_FS_BASE,
677 VMX_VMCS_GUEST_GS_BASE,
678 VMX_VMCS_GUEST_LDTR_BASE,
679 VMX_VMCS_GUEST_TR_BASE,
680 VMX_VMCS_GUEST_GDTR_BASE,
681 VMX_VMCS_GUEST_IDTR_BASE,
682 VMX_VMCS_GUEST_DR7,
683 VMX_VMCS_GUEST_RSP,
684 VMX_VMCS_GUEST_RIP,
685 VMX_VMCS_GUEST_RFLAGS,
686 VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS,
687 VMX_VMCS_GUEST_SYSENTER_ESP,
688 VMX_VMCS_GUEST_SYSENTER_EIP,
689 VMX_VMCS_GUEST_S_CET,
690 VMX_VMCS_GUEST_SSP,
691 VMX_VMCS_GUEST_INTR_SSP_TABLE_ADDR,
692
693 /* Natural-width host-state fields */
694 VMX_VMCS_HOST_CR0,
695 VMX_VMCS_HOST_CR3,
696 VMX_VMCS_HOST_CR4,
697 VMX_VMCS_HOST_FS_BASE,
698 VMX_VMCS_HOST_GS_BASE,
699 VMX_VMCS_HOST_TR_BASE,
700 VMX_VMCS_HOST_GDTR_BASE,
701 VMX_VMCS_HOST_IDTR_BASE,
702 VMX_VMCS_HOST_SYSENTER_ESP,
703 VMX_VMCS_HOST_SYSENTER_EIP,
704 VMX_VMCS_HOST_RSP,
705 VMX_VMCS_HOST_RIP,
706 VMX_VMCS_HOST_S_CET,
707 VMX_VMCS_HOST_SSP,
708 VMX_VMCS_HOST_INTR_SSP_TABLE_ADDR
709};
710#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
711
712#ifdef VBOX_STRICT
713static const uint32_t g_aVmcsSegBase[] =
714{
715 VMX_VMCS_GUEST_ES_BASE,
716 VMX_VMCS_GUEST_CS_BASE,
717 VMX_VMCS_GUEST_SS_BASE,
718 VMX_VMCS_GUEST_DS_BASE,
719 VMX_VMCS_GUEST_FS_BASE,
720 VMX_VMCS_GUEST_GS_BASE
721};
722static const uint32_t g_aVmcsSegSel[] =
723{
724 VMX_VMCS16_GUEST_ES_SEL,
725 VMX_VMCS16_GUEST_CS_SEL,
726 VMX_VMCS16_GUEST_SS_SEL,
727 VMX_VMCS16_GUEST_DS_SEL,
728 VMX_VMCS16_GUEST_FS_SEL,
729 VMX_VMCS16_GUEST_GS_SEL
730};
731static const uint32_t g_aVmcsSegLimit[] =
732{
733 VMX_VMCS32_GUEST_ES_LIMIT,
734 VMX_VMCS32_GUEST_CS_LIMIT,
735 VMX_VMCS32_GUEST_SS_LIMIT,
736 VMX_VMCS32_GUEST_DS_LIMIT,
737 VMX_VMCS32_GUEST_FS_LIMIT,
738 VMX_VMCS32_GUEST_GS_LIMIT
739};
740static const uint32_t g_aVmcsSegAttr[] =
741{
742 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS,
743 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS,
744 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS,
745 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS,
746 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS,
747 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS
748};
749AssertCompile(RT_ELEMENTS(g_aVmcsSegSel) == X86_SREG_COUNT);
750AssertCompile(RT_ELEMENTS(g_aVmcsSegLimit) == X86_SREG_COUNT);
751AssertCompile(RT_ELEMENTS(g_aVmcsSegBase) == X86_SREG_COUNT);
752AssertCompile(RT_ELEMENTS(g_aVmcsSegAttr) == X86_SREG_COUNT);
753#endif /* VBOX_STRICT */
754
755#ifdef HMVMX_USE_FUNCTION_TABLE
756/**
757 * VMX_EXIT dispatch table.
758 */
759static const struct CLANG11NOTHROWWEIRDNESS { PFNVMXEXITHANDLER pfn; } g_aVMExitHandlers[VMX_EXIT_MAX + 1] =
760{
761 /* 0 VMX_EXIT_XCPT_OR_NMI */ { hmR0VmxExitXcptOrNmi },
762 /* 1 VMX_EXIT_EXT_INT */ { hmR0VmxExitExtInt },
763 /* 2 VMX_EXIT_TRIPLE_FAULT */ { hmR0VmxExitTripleFault },
764 /* 3 VMX_EXIT_INIT_SIGNAL */ { hmR0VmxExitErrUnexpected },
765 /* 4 VMX_EXIT_SIPI */ { hmR0VmxExitErrUnexpected },
766 /* 5 VMX_EXIT_IO_SMI */ { hmR0VmxExitErrUnexpected },
767 /* 6 VMX_EXIT_SMI */ { hmR0VmxExitErrUnexpected },
768 /* 7 VMX_EXIT_INT_WINDOW */ { hmR0VmxExitIntWindow },
769 /* 8 VMX_EXIT_NMI_WINDOW */ { hmR0VmxExitNmiWindow },
770 /* 9 VMX_EXIT_TASK_SWITCH */ { hmR0VmxExitTaskSwitch },
771 /* 10 VMX_EXIT_CPUID */ { hmR0VmxExitCpuid },
772 /* 11 VMX_EXIT_GETSEC */ { hmR0VmxExitGetsec },
773 /* 12 VMX_EXIT_HLT */ { hmR0VmxExitHlt },
774 /* 13 VMX_EXIT_INVD */ { hmR0VmxExitInvd },
775 /* 14 VMX_EXIT_INVLPG */ { hmR0VmxExitInvlpg },
776 /* 15 VMX_EXIT_RDPMC */ { hmR0VmxExitRdpmc },
777 /* 16 VMX_EXIT_RDTSC */ { hmR0VmxExitRdtsc },
778 /* 17 VMX_EXIT_RSM */ { hmR0VmxExitErrUnexpected },
779 /* 18 VMX_EXIT_VMCALL */ { hmR0VmxExitVmcall },
780#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
781 /* 19 VMX_EXIT_VMCLEAR */ { hmR0VmxExitVmclear },
782 /* 20 VMX_EXIT_VMLAUNCH */ { hmR0VmxExitVmlaunch },
783 /* 21 VMX_EXIT_VMPTRLD */ { hmR0VmxExitVmptrld },
784 /* 22 VMX_EXIT_VMPTRST */ { hmR0VmxExitVmptrst },
785 /* 23 VMX_EXIT_VMREAD */ { hmR0VmxExitVmread },
786 /* 24 VMX_EXIT_VMRESUME */ { hmR0VmxExitVmresume },
787 /* 25 VMX_EXIT_VMWRITE */ { hmR0VmxExitVmwrite },
788 /* 26 VMX_EXIT_VMXOFF */ { hmR0VmxExitVmxoff },
789 /* 27 VMX_EXIT_VMXON */ { hmR0VmxExitVmxon },
790#else
791 /* 19 VMX_EXIT_VMCLEAR */ { hmR0VmxExitSetPendingXcptUD },
792 /* 20 VMX_EXIT_VMLAUNCH */ { hmR0VmxExitSetPendingXcptUD },
793 /* 21 VMX_EXIT_VMPTRLD */ { hmR0VmxExitSetPendingXcptUD },
794 /* 22 VMX_EXIT_VMPTRST */ { hmR0VmxExitSetPendingXcptUD },
795 /* 23 VMX_EXIT_VMREAD */ { hmR0VmxExitSetPendingXcptUD },
796 /* 24 VMX_EXIT_VMRESUME */ { hmR0VmxExitSetPendingXcptUD },
797 /* 25 VMX_EXIT_VMWRITE */ { hmR0VmxExitSetPendingXcptUD },
798 /* 26 VMX_EXIT_VMXOFF */ { hmR0VmxExitSetPendingXcptUD },
799 /* 27 VMX_EXIT_VMXON */ { hmR0VmxExitSetPendingXcptUD },
800#endif
801 /* 28 VMX_EXIT_MOV_CRX */ { hmR0VmxExitMovCRx },
802 /* 29 VMX_EXIT_MOV_DRX */ { hmR0VmxExitMovDRx },
803 /* 30 VMX_EXIT_IO_INSTR */ { hmR0VmxExitIoInstr },
804 /* 31 VMX_EXIT_RDMSR */ { hmR0VmxExitRdmsr },
805 /* 32 VMX_EXIT_WRMSR */ { hmR0VmxExitWrmsr },
806 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ { hmR0VmxExitErrInvalidGuestState },
807 /* 34 VMX_EXIT_ERR_MSR_LOAD */ { hmR0VmxExitErrUnexpected },
808 /* 35 UNDEFINED */ { hmR0VmxExitErrUnexpected },
809 /* 36 VMX_EXIT_MWAIT */ { hmR0VmxExitMwait },
810 /* 37 VMX_EXIT_MTF */ { hmR0VmxExitMtf },
811 /* 38 UNDEFINED */ { hmR0VmxExitErrUnexpected },
812 /* 39 VMX_EXIT_MONITOR */ { hmR0VmxExitMonitor },
813 /* 40 VMX_EXIT_PAUSE */ { hmR0VmxExitPause },
814 /* 41 VMX_EXIT_ERR_MACHINE_CHECK */ { hmR0VmxExitErrUnexpected },
815 /* 42 UNDEFINED */ { hmR0VmxExitErrUnexpected },
816 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ { hmR0VmxExitTprBelowThreshold },
817 /* 44 VMX_EXIT_APIC_ACCESS */ { hmR0VmxExitApicAccess },
818 /* 45 VMX_EXIT_VIRTUALIZED_EOI */ { hmR0VmxExitErrUnexpected },
819 /* 46 VMX_EXIT_GDTR_IDTR_ACCESS */ { hmR0VmxExitErrUnexpected },
820 /* 47 VMX_EXIT_LDTR_TR_ACCESS */ { hmR0VmxExitErrUnexpected },
821 /* 48 VMX_EXIT_EPT_VIOLATION */ { hmR0VmxExitEptViolation },
822 /* 49 VMX_EXIT_EPT_MISCONFIG */ { hmR0VmxExitEptMisconfig },
823 /* 50 VMX_EXIT_INVEPT */ { hmR0VmxExitSetPendingXcptUD },
824 /* 51 VMX_EXIT_RDTSCP */ { hmR0VmxExitRdtscp },
825 /* 52 VMX_EXIT_PREEMPT_TIMER */ { hmR0VmxExitPreemptTimer },
826#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
827 /* 53 VMX_EXIT_INVVPID */ { hmR0VmxExitInvvpid },
828#else
829 /* 53 VMX_EXIT_INVVPID */ { hmR0VmxExitSetPendingXcptUD },
830#endif
831 /* 54 VMX_EXIT_WBINVD */ { hmR0VmxExitWbinvd },
832 /* 55 VMX_EXIT_XSETBV */ { hmR0VmxExitXsetbv },
833 /* 56 VMX_EXIT_APIC_WRITE */ { hmR0VmxExitErrUnexpected },
834 /* 57 VMX_EXIT_RDRAND */ { hmR0VmxExitErrUnexpected },
835 /* 58 VMX_EXIT_INVPCID */ { hmR0VmxExitInvpcid },
836 /* 59 VMX_EXIT_VMFUNC */ { hmR0VmxExitErrUnexpected },
837 /* 60 VMX_EXIT_ENCLS */ { hmR0VmxExitErrUnexpected },
838 /* 61 VMX_EXIT_RDSEED */ { hmR0VmxExitErrUnexpected },
839 /* 62 VMX_EXIT_PML_FULL */ { hmR0VmxExitErrUnexpected },
840 /* 63 VMX_EXIT_XSAVES */ { hmR0VmxExitErrUnexpected },
841 /* 64 VMX_EXIT_XRSTORS */ { hmR0VmxExitErrUnexpected },
842 /* 65 UNDEFINED */ { hmR0VmxExitErrUnexpected },
843 /* 66 VMX_EXIT_SPP_EVENT */ { hmR0VmxExitErrUnexpected },
844 /* 67 VMX_EXIT_UMWAIT */ { hmR0VmxExitErrUnexpected },
845 /* 68 VMX_EXIT_TPAUSE */ { hmR0VmxExitErrUnexpected },
846 /* 69 VMX_EXIT_LOADIWKEY */ { hmR0VmxExitErrUnexpected },
847};
848#endif /* HMVMX_USE_FUNCTION_TABLE */
849
850#if defined(VBOX_STRICT) && defined(LOG_ENABLED)
851static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
852{
853 /* 0 */ "(Not Used)",
854 /* 1 */ "VMCALL executed in VMX root operation.",
855 /* 2 */ "VMCLEAR with invalid physical address.",
856 /* 3 */ "VMCLEAR with VMXON pointer.",
857 /* 4 */ "VMLAUNCH with non-clear VMCS.",
858 /* 5 */ "VMRESUME with non-launched VMCS.",
859 /* 6 */ "VMRESUME after VMXOFF",
860 /* 7 */ "VM-entry with invalid control fields.",
861 /* 8 */ "VM-entry with invalid host state fields.",
862 /* 9 */ "VMPTRLD with invalid physical address.",
863 /* 10 */ "VMPTRLD with VMXON pointer.",
864 /* 11 */ "VMPTRLD with incorrect revision identifier.",
865 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
866 /* 13 */ "VMWRITE to read-only VMCS component.",
867 /* 14 */ "(Not Used)",
868 /* 15 */ "VMXON executed in VMX root operation.",
869 /* 16 */ "VM-entry with invalid executive-VMCS pointer.",
870 /* 17 */ "VM-entry with non-launched executing VMCS.",
871 /* 18 */ "VM-entry with executive-VMCS pointer not VMXON pointer.",
872 /* 19 */ "VMCALL with non-clear VMCS.",
873 /* 20 */ "VMCALL with invalid VM-exit control fields.",
874 /* 21 */ "(Not Used)",
875 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
876 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
877 /* 24 */ "VMCALL with invalid SMM-monitor features.",
878 /* 25 */ "VM-entry with invalid VM-execution control fields in executive VMCS.",
879 /* 26 */ "VM-entry with events blocked by MOV SS.",
880 /* 27 */ "(Not Used)",
881 /* 28 */ "Invalid operand to INVEPT/INVVPID."
882};
883#endif /* VBOX_STRICT && LOG_ENABLED */
884
885
886/**
887 * Checks if the given MSR is part of the lastbranch-from-IP MSR stack.
888 * @returns @c true if it's part of LBR stack, @c false otherwise.
889 *
890 * @param pVM The cross context VM structure.
891 * @param idMsr The MSR.
892 * @param pidxMsr Where to store the index of the MSR in the LBR MSR array.
893 * Optional, can be NULL.
894 *
895 * @remarks Must only be called when LBR is enabled.
896 */
897DECL_FORCE_INLINE(bool) hmR0VmxIsLbrBranchFromMsr(PCVMCC pVM, uint32_t idMsr, uint32_t *pidxMsr)
898{
899 Assert(pVM->hmr0.s.vmx.fLbr);
900 Assert(pVM->hmr0.s.vmx.idLbrFromIpMsrFirst);
901 uint32_t const cLbrStack = pVM->hmr0.s.vmx.idLbrFromIpMsrLast - pVM->hmr0.s.vmx.idLbrFromIpMsrFirst + 1;
902 uint32_t const idxMsr = idMsr - pVM->hmr0.s.vmx.idLbrFromIpMsrFirst;
903 if (idxMsr < cLbrStack)
904 {
905 if (pidxMsr)
906 *pidxMsr = idxMsr;
907 return true;
908 }
909 return false;
910}
911
912
913/**
914 * Checks if the given MSR is part of the lastbranch-to-IP MSR stack.
915 * @returns @c true if it's part of LBR stack, @c false otherwise.
916 *
917 * @param pVM The cross context VM structure.
918 * @param idMsr The MSR.
919 * @param pidxMsr Where to store the index of the MSR in the LBR MSR array.
920 * Optional, can be NULL.
921 *
922 * @remarks Must only be called when LBR is enabled and when lastbranch-to-IP MSRs
923 * are supported by the CPU (see hmR0VmxSetupLbrMsrRange).
924 */
925DECL_FORCE_INLINE(bool) hmR0VmxIsLbrBranchToMsr(PCVMCC pVM, uint32_t idMsr, uint32_t *pidxMsr)
926{
927 Assert(pVM->hmr0.s.vmx.fLbr);
928 if (pVM->hmr0.s.vmx.idLbrToIpMsrFirst)
929 {
930 uint32_t const cLbrStack = pVM->hmr0.s.vmx.idLbrToIpMsrLast - pVM->hmr0.s.vmx.idLbrToIpMsrFirst + 1;
931 uint32_t const idxMsr = idMsr - pVM->hmr0.s.vmx.idLbrToIpMsrFirst;
932 if (idxMsr < cLbrStack)
933 {
934 if (pidxMsr)
935 *pidxMsr = idxMsr;
936 return true;
937 }
938 }
939 return false;
940}
941
942
943/**
944 * Gets the CR0 guest/host mask.
945 *
946 * These bits typically does not change through the lifetime of a VM. Any bit set in
947 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
948 * by the guest.
949 *
950 * @returns The CR0 guest/host mask.
951 * @param pVCpu The cross context virtual CPU structure.
952 */
953static uint64_t hmR0VmxGetFixedCr0Mask(PCVMCPUCC pVCpu)
954{
955 /*
956 * Modifications to CR0 bits that VT-x ignores saving/restoring (CD, ET, NW) and
957 * to CR0 bits that we require for shadow paging (PG) by the guest must cause VM-exits.
958 *
959 * Furthermore, modifications to any bits that are reserved/unspecified currently
960 * by the Intel spec. must also cause a VM-exit. This prevents unpredictable behavior
961 * when future CPUs specify and use currently reserved/unspecified bits.
962 */
963 /** @todo Avoid intercepting CR0.PE with unrestricted guest execution. Fix PGM
964 * enmGuestMode to be in-sync with the current mode. See @bugref{6398}
965 * and @bugref{6944}. */
966 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
967 return ( X86_CR0_PE
968 | X86_CR0_NE
969 | (pVM->hmr0.s.fNestedPaging ? 0 : X86_CR0_WP)
970 | X86_CR0_PG
971 | VMX_EXIT_HOST_CR0_IGNORE_MASK);
972}
973
974
975/**
976 * Gets the CR4 guest/host mask.
977 *
978 * These bits typically does not change through the lifetime of a VM. Any bit set in
979 * this mask is owned by the host/hypervisor and would cause a VM-exit when modified
980 * by the guest.
981 *
982 * @returns The CR4 guest/host mask.
983 * @param pVCpu The cross context virtual CPU structure.
984 */
985static uint64_t hmR0VmxGetFixedCr4Mask(PCVMCPUCC pVCpu)
986{
987 /*
988 * We construct a mask of all CR4 bits that the guest can modify without causing
989 * a VM-exit. Then invert this mask to obtain all CR4 bits that should cause
990 * a VM-exit when the guest attempts to modify them when executing using
991 * hardware-assisted VMX.
992 *
993 * When a feature is not exposed to the guest (and may be present on the host),
994 * we want to intercept guest modifications to the bit so we can emulate proper
995 * behavior (e.g., #GP).
996 *
997 * Furthermore, only modifications to those bits that don't require immediate
998 * emulation is allowed. For e.g., PCIDE is excluded because the behavior
999 * depends on CR3 which might not always be the guest value while executing
1000 * using hardware-assisted VMX.
1001 */
1002 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
1003 bool const fFsGsBase = pVM->cpum.ro.GuestFeatures.fFsGsBase;
1004 bool const fXSaveRstor = pVM->cpum.ro.GuestFeatures.fXSaveRstor;
1005 bool const fFxSaveRstor = pVM->cpum.ro.GuestFeatures.fFxSaveRstor;
1006
1007 /*
1008 * Paranoia.
1009 * Ensure features exposed to the guest are present on the host.
1010 */
1011 Assert(!fFsGsBase || pVM->cpum.ro.HostFeatures.fFsGsBase);
1012 Assert(!fXSaveRstor || pVM->cpum.ro.HostFeatures.fXSaveRstor);
1013 Assert(!fFxSaveRstor || pVM->cpum.ro.HostFeatures.fFxSaveRstor);
1014
1015 uint64_t const fGstMask = ( X86_CR4_PVI
1016 | X86_CR4_TSD
1017 | X86_CR4_DE
1018 | X86_CR4_MCE
1019 | X86_CR4_PCE
1020 | X86_CR4_OSXMMEEXCPT
1021 | (fFsGsBase ? X86_CR4_FSGSBASE : 0)
1022 | (fXSaveRstor ? X86_CR4_OSXSAVE : 0)
1023 | (fFxSaveRstor ? X86_CR4_OSFXSR : 0));
1024 return ~fGstMask;
1025}
1026
1027
1028/**
1029 * Gets the active (in use) VMCS info. object for the specified VCPU.
1030 *
1031 * This is either the guest or nested-guest VMCS info. and need not necessarily
1032 * pertain to the "current" VMCS (in the VMX definition of the term). For instance,
1033 * if the VM-entry failed due to an invalid-guest state, we may have "cleared" the
1034 * current VMCS while returning to ring-3. However, the VMCS info. object for that
1035 * VMCS would still be active and returned here so that we could dump the VMCS
1036 * fields to ring-3 for diagnostics. This function is thus only used to
1037 * distinguish between the nested-guest or guest VMCS.
1038 *
1039 * @returns The active VMCS information.
1040 * @param pVCpu The cross context virtual CPU structure.
1041 *
1042 * @thread EMT.
1043 * @remarks This function may be called with preemption or interrupts disabled!
1044 */
1045DECLINLINE(PVMXVMCSINFO) hmGetVmxActiveVmcsInfo(PVMCPUCC pVCpu)
1046{
1047 if (!pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs)
1048 return &pVCpu->hmr0.s.vmx.VmcsInfo;
1049 return &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1050}
1051
1052
1053/**
1054 * Returns whether the VM-exit MSR-store area differs from the VM-exit MSR-load
1055 * area.
1056 *
1057 * @returns @c true if it's different, @c false otherwise.
1058 * @param pVmcsInfo The VMCS info. object.
1059 */
1060DECL_FORCE_INLINE(bool) hmR0VmxIsSeparateExitMsrStoreAreaVmcs(PCVMXVMCSINFO pVmcsInfo)
1061{
1062 return RT_BOOL( pVmcsInfo->pvGuestMsrStore != pVmcsInfo->pvGuestMsrLoad
1063 && pVmcsInfo->pvGuestMsrStore);
1064}
1065
1066
1067/**
1068 * Sets the given Processor-based VM-execution controls.
1069 *
1070 * @param pVmxTransient The VMX-transient structure.
1071 * @param uProcCtls The Processor-based VM-execution controls to set.
1072 */
1073static void hmR0VmxSetProcCtlsVmcs(PVMXTRANSIENT pVmxTransient, uint32_t uProcCtls)
1074{
1075 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1076 if ((pVmcsInfo->u32ProcCtls & uProcCtls) != uProcCtls)
1077 {
1078 pVmcsInfo->u32ProcCtls |= uProcCtls;
1079 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
1080 AssertRC(rc);
1081 }
1082}
1083
1084
1085/**
1086 * Removes the given Processor-based VM-execution controls.
1087 *
1088 * @param pVCpu The cross context virtual CPU structure.
1089 * @param pVmxTransient The VMX-transient structure.
1090 * @param uProcCtls The Processor-based VM-execution controls to remove.
1091 *
1092 * @remarks When executing a nested-guest, this will not remove any of the specified
1093 * controls if the nested hypervisor has set any one of them.
1094 */
1095static void hmR0VmxRemoveProcCtlsVmcs(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uProcCtls)
1096{
1097 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1098 if (pVmcsInfo->u32ProcCtls & uProcCtls)
1099 {
1100#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1101 if ( !pVmxTransient->fIsNestedGuest
1102 || !CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uProcCtls))
1103#else
1104 NOREF(pVCpu);
1105 if (!pVmxTransient->fIsNestedGuest)
1106#endif
1107 {
1108 pVmcsInfo->u32ProcCtls &= ~uProcCtls;
1109 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
1110 AssertRC(rc);
1111 }
1112 }
1113}
1114
1115
1116/**
1117 * Sets the TSC offset for the current VMCS.
1118 *
1119 * @param uTscOffset The TSC offset to set.
1120 * @param pVmcsInfo The VMCS info. object.
1121 */
1122static void hmR0VmxSetTscOffsetVmcs(PVMXVMCSINFO pVmcsInfo, uint64_t uTscOffset)
1123{
1124 if (pVmcsInfo->u64TscOffset != uTscOffset)
1125 {
1126 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, uTscOffset);
1127 AssertRC(rc);
1128 pVmcsInfo->u64TscOffset = uTscOffset;
1129 }
1130}
1131
1132
1133/**
1134 * Adds one or more exceptions to the exception bitmap and commits it to the current
1135 * VMCS.
1136 *
1137 * @param pVmxTransient The VMX-transient structure.
1138 * @param uXcptMask The exception(s) to add.
1139 */
1140static void hmR0VmxAddXcptInterceptMask(PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
1141{
1142 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1143 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
1144 if ((uXcptBitmap & uXcptMask) != uXcptMask)
1145 {
1146 uXcptBitmap |= uXcptMask;
1147 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
1148 AssertRC(rc);
1149 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
1150 }
1151}
1152
1153
1154/**
1155 * Adds an exception to the exception bitmap and commits it to the current VMCS.
1156 *
1157 * @param pVmxTransient The VMX-transient structure.
1158 * @param uXcpt The exception to add.
1159 */
1160static void hmR0VmxAddXcptIntercept(PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
1161{
1162 Assert(uXcpt <= X86_XCPT_LAST);
1163 hmR0VmxAddXcptInterceptMask(pVmxTransient, RT_BIT_32(uXcpt));
1164}
1165
1166
1167/**
1168 * Remove one or more exceptions from the exception bitmap and commits it to the
1169 * current VMCS.
1170 *
1171 * This takes care of not removing the exception intercept if a nested-guest
1172 * requires the exception to be intercepted.
1173 *
1174 * @returns VBox status code.
1175 * @param pVCpu The cross context virtual CPU structure.
1176 * @param pVmxTransient The VMX-transient structure.
1177 * @param uXcptMask The exception(s) to remove.
1178 */
1179static int hmR0VmxRemoveXcptInterceptMask(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t uXcptMask)
1180{
1181 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1182 uint32_t u32XcptBitmap = pVmcsInfo->u32XcptBitmap;
1183 if (u32XcptBitmap & uXcptMask)
1184 {
1185#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1186 if (!pVmxTransient->fIsNestedGuest)
1187 { /* likely */ }
1188 else
1189 uXcptMask &= ~pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
1190#endif
1191#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
1192 uXcptMask &= ~( RT_BIT(X86_XCPT_BP)
1193 | RT_BIT(X86_XCPT_DE)
1194 | RT_BIT(X86_XCPT_NM)
1195 | RT_BIT(X86_XCPT_TS)
1196 | RT_BIT(X86_XCPT_UD)
1197 | RT_BIT(X86_XCPT_NP)
1198 | RT_BIT(X86_XCPT_SS)
1199 | RT_BIT(X86_XCPT_GP)
1200 | RT_BIT(X86_XCPT_PF)
1201 | RT_BIT(X86_XCPT_MF));
1202#elif defined(HMVMX_ALWAYS_TRAP_PF)
1203 uXcptMask &= ~RT_BIT(X86_XCPT_PF);
1204#endif
1205 if (uXcptMask)
1206 {
1207 /* Validate we are not removing any essential exception intercepts. */
1208 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || !(uXcptMask & RT_BIT(X86_XCPT_PF)));
1209 NOREF(pVCpu);
1210 Assert(!(uXcptMask & RT_BIT(X86_XCPT_DB)));
1211 Assert(!(uXcptMask & RT_BIT(X86_XCPT_AC)));
1212
1213 /* Remove it from the exception bitmap. */
1214 u32XcptBitmap &= ~uXcptMask;
1215
1216 /* Commit and update the cache if necessary. */
1217 if (pVmcsInfo->u32XcptBitmap != u32XcptBitmap)
1218 {
1219 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
1220 AssertRC(rc);
1221 pVmcsInfo->u32XcptBitmap = u32XcptBitmap;
1222 }
1223 }
1224 }
1225 return VINF_SUCCESS;
1226}
1227
1228
1229/**
1230 * Remove an exceptions from the exception bitmap and commits it to the current
1231 * VMCS.
1232 *
1233 * @returns VBox status code.
1234 * @param pVCpu The cross context virtual CPU structure.
1235 * @param pVmxTransient The VMX-transient structure.
1236 * @param uXcpt The exception to remove.
1237 */
1238static int hmR0VmxRemoveXcptIntercept(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint8_t uXcpt)
1239{
1240 return hmR0VmxRemoveXcptInterceptMask(pVCpu, pVmxTransient, RT_BIT(uXcpt));
1241}
1242
1243
1244/**
1245 * Loads the VMCS specified by the VMCS info. object.
1246 *
1247 * @returns VBox status code.
1248 * @param pVmcsInfo The VMCS info. object.
1249 *
1250 * @remarks Can be called with interrupts disabled.
1251 */
1252static int hmR0VmxLoadVmcs(PVMXVMCSINFO pVmcsInfo)
1253{
1254 Assert(pVmcsInfo->HCPhysVmcs != 0 && pVmcsInfo->HCPhysVmcs != NIL_RTHCPHYS);
1255 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1256
1257 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysVmcs);
1258 if (RT_SUCCESS(rc))
1259 pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
1260 return rc;
1261}
1262
1263
1264/**
1265 * Clears the VMCS specified by the VMCS info. object.
1266 *
1267 * @returns VBox status code.
1268 * @param pVmcsInfo The VMCS info. object.
1269 *
1270 * @remarks Can be called with interrupts disabled.
1271 */
1272static int hmR0VmxClearVmcs(PVMXVMCSINFO pVmcsInfo)
1273{
1274 Assert(pVmcsInfo->HCPhysVmcs != 0 && pVmcsInfo->HCPhysVmcs != NIL_RTHCPHYS);
1275 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1276
1277 int rc = VMXClearVmcs(pVmcsInfo->HCPhysVmcs);
1278 if (RT_SUCCESS(rc))
1279 pVmcsInfo->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
1280 return rc;
1281}
1282
1283
1284#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1285/**
1286 * Loads the shadow VMCS specified by the VMCS info. object.
1287 *
1288 * @returns VBox status code.
1289 * @param pVmcsInfo The VMCS info. object.
1290 *
1291 * @remarks Can be called with interrupts disabled.
1292 */
1293static int hmR0VmxLoadShadowVmcs(PVMXVMCSINFO pVmcsInfo)
1294{
1295 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1296 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1297
1298 int rc = VMXLoadVmcs(pVmcsInfo->HCPhysShadowVmcs);
1299 if (RT_SUCCESS(rc))
1300 pVmcsInfo->fShadowVmcsState |= VMX_V_VMCS_LAUNCH_STATE_CURRENT;
1301 return rc;
1302}
1303
1304
1305/**
1306 * Clears the shadow VMCS specified by the VMCS info. object.
1307 *
1308 * @returns VBox status code.
1309 * @param pVmcsInfo The VMCS info. object.
1310 *
1311 * @remarks Can be called with interrupts disabled.
1312 */
1313static int hmR0VmxClearShadowVmcs(PVMXVMCSINFO pVmcsInfo)
1314{
1315 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1316 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
1317
1318 int rc = VMXClearVmcs(pVmcsInfo->HCPhysShadowVmcs);
1319 if (RT_SUCCESS(rc))
1320 pVmcsInfo->fShadowVmcsState = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
1321 return rc;
1322}
1323
1324
1325/**
1326 * Switches from and to the specified VMCSes.
1327 *
1328 * @returns VBox status code.
1329 * @param pVmcsInfoFrom The VMCS info. object we are switching from.
1330 * @param pVmcsInfoTo The VMCS info. object we are switching to.
1331 *
1332 * @remarks Called with interrupts disabled.
1333 */
1334static int hmR0VmxSwitchVmcs(PVMXVMCSINFO pVmcsInfoFrom, PVMXVMCSINFO pVmcsInfoTo)
1335{
1336 /*
1337 * Clear the VMCS we are switching out if it has not already been cleared.
1338 * This will sync any CPU internal data back to the VMCS.
1339 */
1340 if (pVmcsInfoFrom->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1341 {
1342 int rc = hmR0VmxClearVmcs(pVmcsInfoFrom);
1343 if (RT_SUCCESS(rc))
1344 {
1345 /*
1346 * The shadow VMCS, if any, would not be active at this point since we
1347 * would have cleared it while importing the virtual hardware-virtualization
1348 * state as part the VMLAUNCH/VMRESUME VM-exit. Hence, there's no need to
1349 * clear the shadow VMCS here, just assert for safety.
1350 */
1351 Assert(!pVmcsInfoFrom->pvShadowVmcs || pVmcsInfoFrom->fShadowVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR);
1352 }
1353 else
1354 return rc;
1355 }
1356
1357 /*
1358 * Clear the VMCS we are switching to if it has not already been cleared.
1359 * This will initialize the VMCS launch state to "clear" required for loading it.
1360 *
1361 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
1362 */
1363 if (pVmcsInfoTo->fVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
1364 {
1365 int rc = hmR0VmxClearVmcs(pVmcsInfoTo);
1366 if (RT_SUCCESS(rc))
1367 { /* likely */ }
1368 else
1369 return rc;
1370 }
1371
1372 /*
1373 * Finally, load the VMCS we are switching to.
1374 */
1375 return hmR0VmxLoadVmcs(pVmcsInfoTo);
1376}
1377
1378
1379/**
1380 * Switches between the guest VMCS and the nested-guest VMCS as specified by the
1381 * caller.
1382 *
1383 * @returns VBox status code.
1384 * @param pVCpu The cross context virtual CPU structure.
1385 * @param fSwitchToNstGstVmcs Whether to switch to the nested-guest VMCS (pass
1386 * true) or guest VMCS (pass false).
1387 */
1388static int hmR0VmxSwitchToGstOrNstGstVmcs(PVMCPUCC pVCpu, bool fSwitchToNstGstVmcs)
1389{
1390 /* Ensure we have synced everything from the guest-CPU context to the VMCS before switching. */
1391 HMVMX_CPUMCTX_ASSERT(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);
1392
1393 PVMXVMCSINFO pVmcsInfoFrom;
1394 PVMXVMCSINFO pVmcsInfoTo;
1395 if (fSwitchToNstGstVmcs)
1396 {
1397 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfo;
1398 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1399 }
1400 else
1401 {
1402 pVmcsInfoFrom = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
1403 pVmcsInfoTo = &pVCpu->hmr0.s.vmx.VmcsInfo;
1404 }
1405
1406 /*
1407 * Disable interrupts to prevent being preempted while we switch the current VMCS as the
1408 * preemption hook code path acquires the current VMCS.
1409 */
1410 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1411
1412 int rc = hmR0VmxSwitchVmcs(pVmcsInfoFrom, pVmcsInfoTo);
1413 if (RT_SUCCESS(rc))
1414 {
1415 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fSwitchToNstGstVmcs;
1416 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fSwitchToNstGstVmcs;
1417
1418 /*
1419 * If we are switching to a VMCS that was executed on a different host CPU or was
1420 * never executed before, flag that we need to export the host state before executing
1421 * guest/nested-guest code using hardware-assisted VMX.
1422 *
1423 * This could probably be done in a preemptible context since the preemption hook
1424 * will flag the necessary change in host context. However, since preemption is
1425 * already disabled and to avoid making assumptions about host specific code in
1426 * RTMpCpuId when called with preemption enabled, we'll do this while preemption is
1427 * disabled.
1428 */
1429 if (pVmcsInfoTo->idHostCpuState == RTMpCpuId())
1430 { /* likely */ }
1431 else
1432 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE);
1433
1434 ASMSetFlags(fEFlags);
1435
1436 /*
1437 * We use a different VM-exit MSR-store areas for the guest and nested-guest. Hence,
1438 * flag that we need to update the host MSR values there. Even if we decide in the
1439 * future to share the VM-exit MSR-store area page between the guest and nested-guest,
1440 * if its content differs, we would have to update the host MSRs anyway.
1441 */
1442 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
1443 }
1444 else
1445 ASMSetFlags(fEFlags);
1446 return rc;
1447}
1448#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1449
1450
1451/**
1452 * Updates the VM's last error record.
1453 *
1454 * If there was a VMX instruction error, reads the error data from the VMCS and
1455 * updates VCPU's last error record as well.
1456 *
1457 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1458 * Can be NULL if @a rc is not VERR_VMX_UNABLE_TO_START_VM or
1459 * VERR_VMX_INVALID_VMCS_FIELD.
1460 * @param rc The error code.
1461 */
1462static void hmR0VmxUpdateErrorRecord(PVMCPUCC pVCpu, int rc)
1463{
1464 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
1465 || rc == VERR_VMX_UNABLE_TO_START_VM)
1466 {
1467 AssertPtrReturnVoid(pVCpu);
1468 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
1469 }
1470 pVCpu->CTX_SUFF(pVM)->hm.s.ForR3.rcInit = rc;
1471}
1472
1473
1474#ifdef VBOX_STRICT
1475/**
1476 * Reads the VM-entry interruption-information field from the VMCS into the VMX
1477 * transient structure.
1478 *
1479 * @param pVmxTransient The VMX-transient structure.
1480 */
1481DECLINLINE(void) hmR0VmxReadEntryIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
1482{
1483 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntInfo);
1484 AssertRC(rc);
1485}
1486
1487
1488/**
1489 * Reads the VM-entry exception error code field from the VMCS into
1490 * the VMX transient structure.
1491 *
1492 * @param pVmxTransient The VMX-transient structure.
1493 */
1494DECLINLINE(void) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
1495{
1496 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
1497 AssertRC(rc);
1498}
1499
1500
1501/**
1502 * Reads the VM-entry exception error code field from the VMCS into
1503 * the VMX transient structure.
1504 *
1505 * @param pVmxTransient The VMX-transient structure.
1506 */
1507DECLINLINE(void) hmR0VmxReadEntryInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
1508{
1509 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
1510 AssertRC(rc);
1511}
1512#endif /* VBOX_STRICT */
1513
1514
1515/**
1516 * Reads the VM-exit interruption-information field from the VMCS into the VMX
1517 * transient structure.
1518 *
1519 * @param pVmxTransient The VMX-transient structure.
1520 */
1521DECLINLINE(void) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
1522{
1523 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO))
1524 {
1525 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1526 AssertRC(rc);
1527 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO;
1528 }
1529}
1530
1531
1532/**
1533 * Reads the VM-exit interruption error code from the VMCS into the VMX
1534 * transient structure.
1535 *
1536 * @param pVmxTransient The VMX-transient structure.
1537 */
1538DECLINLINE(void) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
1539{
1540 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE))
1541 {
1542 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1543 AssertRC(rc);
1544 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE;
1545 }
1546}
1547
1548
1549/**
1550 * Reads the VM-exit instruction length field from the VMCS into the VMX
1551 * transient structure.
1552 *
1553 * @param pVmxTransient The VMX-transient structure.
1554 */
1555DECLINLINE(void) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
1556{
1557 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN))
1558 {
1559 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1560 AssertRC(rc);
1561 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN;
1562 }
1563}
1564
1565
1566/**
1567 * Reads the VM-exit instruction-information field from the VMCS into
1568 * the VMX transient structure.
1569 *
1570 * @param pVmxTransient The VMX-transient structure.
1571 */
1572DECLINLINE(void) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient)
1573{
1574 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO))
1575 {
1576 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1577 AssertRC(rc);
1578 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO;
1579 }
1580}
1581
1582
1583/**
1584 * Reads the Exit Qualification from the VMCS into the VMX transient structure.
1585 *
1586 * @param pVmxTransient The VMX-transient structure.
1587 */
1588DECLINLINE(void) hmR0VmxReadExitQualVmcs(PVMXTRANSIENT pVmxTransient)
1589{
1590 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION))
1591 {
1592 int rc = VMXReadVmcsNw(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1593 AssertRC(rc);
1594 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION;
1595 }
1596}
1597
1598
1599/**
1600 * Reads the Guest-linear address from the VMCS into the VMX transient structure.
1601 *
1602 * @param pVmxTransient The VMX-transient structure.
1603 */
1604DECLINLINE(void) hmR0VmxReadGuestLinearAddrVmcs(PVMXTRANSIENT pVmxTransient)
1605{
1606 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_LINEAR_ADDR))
1607 {
1608 int rc = VMXReadVmcsNw(VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1609 AssertRC(rc);
1610 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_LINEAR_ADDR;
1611 }
1612}
1613
1614
1615/**
1616 * Reads the Guest-physical address from the VMCS into the VMX transient structure.
1617 *
1618 * @param pVmxTransient The VMX-transient structure.
1619 */
1620DECLINLINE(void) hmR0VmxReadGuestPhysicalAddrVmcs(PVMXTRANSIENT pVmxTransient)
1621{
1622 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PHYSICAL_ADDR))
1623 {
1624 int rc = VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1625 AssertRC(rc);
1626 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PHYSICAL_ADDR;
1627 }
1628}
1629
1630#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1631/**
1632 * Reads the Guest pending-debug exceptions from the VMCS into the VMX transient
1633 * structure.
1634 *
1635 * @param pVmxTransient The VMX-transient structure.
1636 */
1637DECLINLINE(void) hmR0VmxReadGuestPendingDbgXctps(PVMXTRANSIENT pVmxTransient)
1638{
1639 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_GUEST_PENDING_DBG_XCPTS))
1640 {
1641 int rc = VMXReadVmcsNw(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &pVmxTransient->uGuestPendingDbgXcpts);
1642 AssertRC(rc);
1643 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_GUEST_PENDING_DBG_XCPTS;
1644 }
1645}
1646#endif
1647
1648/**
1649 * Reads the IDT-vectoring information field from the VMCS into the VMX
1650 * transient structure.
1651 *
1652 * @param pVmxTransient The VMX-transient structure.
1653 *
1654 * @remarks No-long-jump zone!!!
1655 */
1656DECLINLINE(void) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
1657{
1658 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO))
1659 {
1660 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1661 AssertRC(rc);
1662 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO;
1663 }
1664}
1665
1666
1667/**
1668 * Reads the IDT-vectoring error code from the VMCS into the VMX
1669 * transient structure.
1670 *
1671 * @param pVmxTransient The VMX-transient structure.
1672 */
1673DECLINLINE(void) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
1674{
1675 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE))
1676 {
1677 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1678 AssertRC(rc);
1679 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE;
1680 }
1681}
1682
1683#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
1684/**
1685 * Reads all relevant read-only VMCS fields into the VMX transient structure.
1686 *
1687 * @param pVmxTransient The VMX-transient structure.
1688 */
1689static void hmR0VmxReadAllRoFieldsVmcs(PVMXTRANSIENT pVmxTransient)
1690{
1691 int rc = VMXReadVmcsNw(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQual);
1692 rc |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbExitInstr);
1693 rc |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u);
1694 rc |= VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo);
1695 rc |= VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
1696 rc |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo);
1697 rc |= VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode);
1698 rc |= VMXReadVmcsNw(VMX_VMCS_RO_GUEST_LINEAR_ADDR, &pVmxTransient->uGuestLinearAddr);
1699 rc |= VMXReadVmcs64(VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL, &pVmxTransient->uGuestPhysicalAddr);
1700 AssertRC(rc);
1701 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION
1702 | HMVMX_READ_EXIT_INSTR_LEN
1703 | HMVMX_READ_EXIT_INSTR_INFO
1704 | HMVMX_READ_IDT_VECTORING_INFO
1705 | HMVMX_READ_IDT_VECTORING_ERROR_CODE
1706 | HMVMX_READ_EXIT_INTERRUPTION_INFO
1707 | HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE
1708 | HMVMX_READ_GUEST_LINEAR_ADDR
1709 | HMVMX_READ_GUEST_PHYSICAL_ADDR;
1710}
1711#endif
1712
1713/**
1714 * Enters VMX root mode operation on the current CPU.
1715 *
1716 * @returns VBox status code.
1717 * @param pHostCpu The HM physical-CPU structure.
1718 * @param pVM The cross context VM structure. Can be
1719 * NULL, after a resume.
1720 * @param HCPhysCpuPage Physical address of the VMXON region.
1721 * @param pvCpuPage Pointer to the VMXON region.
1722 */
1723static int hmR0VmxEnterRootMode(PHMPHYSCPU pHostCpu, PVMCC pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
1724{
1725 Assert(pHostCpu);
1726 Assert(HCPhysCpuPage && HCPhysCpuPage != NIL_RTHCPHYS);
1727 Assert(RT_ALIGN_T(HCPhysCpuPage, _4K, RTHCPHYS) == HCPhysCpuPage);
1728 Assert(pvCpuPage);
1729 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1730
1731 if (pVM)
1732 {
1733 /* Write the VMCS revision identifier to the VMXON region. */
1734 *(uint32_t *)pvCpuPage = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID);
1735 }
1736
1737 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with CR4. */
1738 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1739
1740 /* Enable the VMX bit in CR4 if necessary. */
1741 RTCCUINTREG const uOldCr4 = SUPR0ChangeCR4(X86_CR4_VMXE, RTCCUINTREG_MAX);
1742
1743 /* Record whether VMXE was already prior to us enabling it above. */
1744 pHostCpu->fVmxeAlreadyEnabled = RT_BOOL(uOldCr4 & X86_CR4_VMXE);
1745
1746 /* Enter VMX root mode. */
1747 int rc = VMXEnable(HCPhysCpuPage);
1748 if (RT_FAILURE(rc))
1749 {
1750 /* Restore CR4.VMXE if it was not set prior to our attempt to set it above. */
1751 if (!pHostCpu->fVmxeAlreadyEnabled)
1752 SUPR0ChangeCR4(0 /* fOrMask */, ~(uint64_t)X86_CR4_VMXE);
1753
1754 if (pVM)
1755 pVM->hm.s.ForR3.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
1756 }
1757
1758 /* Restore interrupts. */
1759 ASMSetFlags(fEFlags);
1760 return rc;
1761}
1762
1763
1764/**
1765 * Exits VMX root mode operation on the current CPU.
1766 *
1767 * @returns VBox status code.
1768 * @param pHostCpu The HM physical-CPU structure.
1769 */
1770static int hmR0VmxLeaveRootMode(PHMPHYSCPU pHostCpu)
1771{
1772 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1773
1774 /* Paranoid: Disable interrupts as, in theory, interrupts handlers might mess with CR4. */
1775 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
1776
1777 /* If we're for some reason not in VMX root mode, then don't leave it. */
1778 RTCCUINTREG const uHostCr4 = ASMGetCR4();
1779
1780 int rc;
1781 if (uHostCr4 & X86_CR4_VMXE)
1782 {
1783 /* Exit VMX root mode and clear the VMX bit in CR4. */
1784 VMXDisable();
1785
1786 /* Clear CR4.VMXE only if it was clear prior to use setting it. */
1787 if (!pHostCpu->fVmxeAlreadyEnabled)
1788 SUPR0ChangeCR4(0 /* fOrMask */, ~(uint64_t)X86_CR4_VMXE);
1789
1790 rc = VINF_SUCCESS;
1791 }
1792 else
1793 rc = VERR_VMX_NOT_IN_VMX_ROOT_MODE;
1794
1795 /* Restore interrupts. */
1796 ASMSetFlags(fEFlags);
1797 return rc;
1798}
1799
1800
1801/**
1802 * Allocates pages specified as specified by an array of VMX page allocation info
1803 * objects.
1804 *
1805 * The pages contents are zero'd after allocation.
1806 *
1807 * @returns VBox status code.
1808 * @param phMemObj Where to return the handle to the allocation.
1809 * @param paAllocInfo The pointer to the first element of the VMX
1810 * page-allocation info object array.
1811 * @param cEntries The number of elements in the @a paAllocInfo array.
1812 */
1813static int hmR0VmxPagesAllocZ(PRTR0MEMOBJ phMemObj, PVMXPAGEALLOCINFO paAllocInfo, uint32_t cEntries)
1814{
1815 *phMemObj = NIL_RTR0MEMOBJ;
1816
1817 /* Figure out how many pages to allocate. */
1818 uint32_t cPages = 0;
1819 for (uint32_t iPage = 0; iPage < cEntries; iPage++)
1820 cPages += !!paAllocInfo[iPage].fValid;
1821
1822 /* Allocate the pages. */
1823 if (cPages)
1824 {
1825 size_t const cbPages = cPages << PAGE_SHIFT;
1826 int rc = RTR0MemObjAllocPage(phMemObj, cbPages, false /* fExecutable */);
1827 if (RT_FAILURE(rc))
1828 return rc;
1829
1830 /* Zero the contents and assign each page to the corresponding VMX page-allocation entry. */
1831 void *pvFirstPage = RTR0MemObjAddress(*phMemObj);
1832 RT_BZERO(pvFirstPage, cbPages);
1833
1834 uint32_t iPage = 0;
1835 for (uint32_t i = 0; i < cEntries; i++)
1836 if (paAllocInfo[i].fValid)
1837 {
1838 RTHCPHYS const HCPhysPage = RTR0MemObjGetPagePhysAddr(*phMemObj, iPage);
1839 void *pvPage = (void *)((uintptr_t)pvFirstPage + (iPage << X86_PAGE_4K_SHIFT));
1840 Assert(HCPhysPage && HCPhysPage != NIL_RTHCPHYS);
1841 AssertPtr(pvPage);
1842
1843 Assert(paAllocInfo[iPage].pHCPhys);
1844 Assert(paAllocInfo[iPage].ppVirt);
1845 *paAllocInfo[iPage].pHCPhys = HCPhysPage;
1846 *paAllocInfo[iPage].ppVirt = pvPage;
1847
1848 /* Move to next page. */
1849 ++iPage;
1850 }
1851
1852 /* Make sure all valid (requested) pages have been assigned. */
1853 Assert(iPage == cPages);
1854 }
1855 return VINF_SUCCESS;
1856}
1857
1858
1859/**
1860 * Frees pages allocated using hmR0VmxPagesAllocZ.
1861 *
1862 * @param phMemObj Pointer to the memory object handle. Will be set to
1863 * NIL.
1864 */
1865DECL_FORCE_INLINE(void) hmR0VmxPagesFree(PRTR0MEMOBJ phMemObj)
1866{
1867 /* We can cleanup wholesale since it's all one allocation. */
1868 if (*phMemObj != NIL_RTR0MEMOBJ)
1869 {
1870 RTR0MemObjFree(*phMemObj, true /* fFreeMappings */);
1871 *phMemObj = NIL_RTR0MEMOBJ;
1872 }
1873}
1874
1875
1876/**
1877 * Initializes a VMCS info. object.
1878 *
1879 * @param pVmcsInfo The VMCS info. object.
1880 * @param pVmcsInfoShared The VMCS info. object shared with ring-3.
1881 */
1882static void hmR0VmxVmcsInfoInit(PVMXVMCSINFO pVmcsInfo, PVMXVMCSINFOSHARED pVmcsInfoShared)
1883{
1884 RT_ZERO(*pVmcsInfo);
1885 RT_ZERO(*pVmcsInfoShared);
1886
1887 pVmcsInfo->pShared = pVmcsInfoShared;
1888 Assert(pVmcsInfo->hMemObj == NIL_RTR0MEMOBJ);
1889 pVmcsInfo->HCPhysVmcs = NIL_RTHCPHYS;
1890 pVmcsInfo->HCPhysShadowVmcs = NIL_RTHCPHYS;
1891 pVmcsInfo->HCPhysMsrBitmap = NIL_RTHCPHYS;
1892 pVmcsInfo->HCPhysGuestMsrLoad = NIL_RTHCPHYS;
1893 pVmcsInfo->HCPhysGuestMsrStore = NIL_RTHCPHYS;
1894 pVmcsInfo->HCPhysHostMsrLoad = NIL_RTHCPHYS;
1895 pVmcsInfo->HCPhysVirtApic = NIL_RTHCPHYS;
1896 pVmcsInfo->HCPhysEPTP = NIL_RTHCPHYS;
1897 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
1898 pVmcsInfo->idHostCpuState = NIL_RTCPUID;
1899 pVmcsInfo->idHostCpuExec = NIL_RTCPUID;
1900}
1901
1902
1903/**
1904 * Frees the VT-x structures for a VMCS info. object.
1905 *
1906 * @param pVmcsInfo The VMCS info. object.
1907 * @param pVmcsInfoShared The VMCS info. object shared with ring-3.
1908 */
1909static void hmR0VmxVmcsInfoFree(PVMXVMCSINFO pVmcsInfo, PVMXVMCSINFOSHARED pVmcsInfoShared)
1910{
1911 hmR0VmxPagesFree(&pVmcsInfo->hMemObj);
1912 hmR0VmxVmcsInfoInit(pVmcsInfo, pVmcsInfoShared);
1913}
1914
1915
1916/**
1917 * Allocates the VT-x structures for a VMCS info. object.
1918 *
1919 * @returns VBox status code.
1920 * @param pVCpu The cross context virtual CPU structure.
1921 * @param pVmcsInfo The VMCS info. object.
1922 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
1923 *
1924 * @remarks The caller is expected to take care of any and all allocation failures.
1925 * This function will not perform any cleanup for failures half-way
1926 * through.
1927 */
1928static int hmR0VmxAllocVmcsInfo(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
1929{
1930 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1931
1932 bool const fMsrBitmaps = RT_BOOL(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS);
1933 bool const fShadowVmcs = !fIsNstGstVmcs ? pVM->hmr0.s.vmx.fUseVmcsShadowing : pVM->cpum.ro.GuestFeatures.fVmxVmcsShadowing;
1934 Assert(!pVM->cpum.ro.GuestFeatures.fVmxVmcsShadowing); /* VMCS shadowing is not yet exposed to the guest. */
1935 VMXPAGEALLOCINFO aAllocInfo[] =
1936 {
1937 { true, 0 /* Unused */, &pVmcsInfo->HCPhysVmcs, &pVmcsInfo->pvVmcs },
1938 { true, 0 /* Unused */, &pVmcsInfo->HCPhysGuestMsrLoad, &pVmcsInfo->pvGuestMsrLoad },
1939 { true, 0 /* Unused */, &pVmcsInfo->HCPhysHostMsrLoad, &pVmcsInfo->pvHostMsrLoad },
1940 { fMsrBitmaps, 0 /* Unused */, &pVmcsInfo->HCPhysMsrBitmap, &pVmcsInfo->pvMsrBitmap },
1941 { fShadowVmcs, 0 /* Unused */, &pVmcsInfo->HCPhysShadowVmcs, &pVmcsInfo->pvShadowVmcs },
1942 };
1943
1944 int rc = hmR0VmxPagesAllocZ(&pVmcsInfo->hMemObj, &aAllocInfo[0], RT_ELEMENTS(aAllocInfo));
1945 if (RT_FAILURE(rc))
1946 return rc;
1947
1948 /*
1949 * We use the same page for VM-entry MSR-load and VM-exit MSR store areas.
1950 * Because they contain a symmetric list of guest MSRs to load on VM-entry and store on VM-exit.
1951 */
1952 AssertCompile(RT_ELEMENTS(aAllocInfo) > 0);
1953 Assert(pVmcsInfo->HCPhysGuestMsrLoad != NIL_RTHCPHYS);
1954 pVmcsInfo->pvGuestMsrStore = pVmcsInfo->pvGuestMsrLoad;
1955 pVmcsInfo->HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrLoad;
1956
1957 /*
1958 * Get the virtual-APIC page rather than allocating them again.
1959 */
1960 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW)
1961 {
1962 if (!fIsNstGstVmcs)
1963 {
1964 if (PDMHasApic(pVM))
1965 {
1966 rc = APICGetApicPageForCpu(pVCpu, &pVmcsInfo->HCPhysVirtApic, (PRTR0PTR)&pVmcsInfo->pbVirtApic, NULL /*pR3Ptr*/);
1967 if (RT_FAILURE(rc))
1968 return rc;
1969 Assert(pVmcsInfo->pbVirtApic);
1970 Assert(pVmcsInfo->HCPhysVirtApic && pVmcsInfo->HCPhysVirtApic != NIL_RTHCPHYS);
1971 }
1972 }
1973 else
1974 {
1975 pVmcsInfo->pbVirtApic = &pVCpu->cpum.GstCtx.hwvirt.vmx.abVirtApicPage[0];
1976 pVmcsInfo->HCPhysVirtApic = GVMMR0ConvertGVMPtr2HCPhys(pVM, pVmcsInfo->pbVirtApic);
1977 Assert(pVmcsInfo->HCPhysVirtApic && pVmcsInfo->HCPhysVirtApic != NIL_RTHCPHYS);
1978 }
1979 }
1980
1981 return VINF_SUCCESS;
1982}
1983
1984
1985/**
1986 * Free all VT-x structures for the VM.
1987 *
1988 * @returns IPRT status code.
1989 * @param pVM The cross context VM structure.
1990 */
1991static void hmR0VmxStructsFree(PVMCC pVM)
1992{
1993 hmR0VmxPagesFree(&pVM->hmr0.s.vmx.hMemObj);
1994#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1995 if (pVM->hmr0.s.vmx.fUseVmcsShadowing)
1996 {
1997 RTMemFree(pVM->hmr0.s.vmx.paShadowVmcsFields);
1998 pVM->hmr0.s.vmx.paShadowVmcsFields = NULL;
1999 RTMemFree(pVM->hmr0.s.vmx.paShadowVmcsRoFields);
2000 pVM->hmr0.s.vmx.paShadowVmcsRoFields = NULL;
2001 }
2002#endif
2003
2004 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2005 {
2006 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
2007 hmR0VmxVmcsInfoFree(&pVCpu->hmr0.s.vmx.VmcsInfo, &pVCpu->hm.s.vmx.VmcsInfo);
2008#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2009 if (pVM->cpum.ro.GuestFeatures.fVmx)
2010 hmR0VmxVmcsInfoFree(&pVCpu->hmr0.s.vmx.VmcsInfoNstGst, &pVCpu->hm.s.vmx.VmcsInfoNstGst);
2011#endif
2012 }
2013}
2014
2015
2016/**
2017 * Allocate all VT-x structures for the VM.
2018 *
2019 * @returns IPRT status code.
2020 * @param pVM The cross context VM structure.
2021 *
2022 * @remarks This functions will cleanup on memory allocation failures.
2023 */
2024static int hmR0VmxStructsAlloc(PVMCC pVM)
2025{
2026 /*
2027 * Sanity check the VMCS size reported by the CPU as we assume 4KB allocations.
2028 * The VMCS size cannot be more than 4096 bytes.
2029 *
2030 * See Intel spec. Appendix A.1 "Basic VMX Information".
2031 */
2032 uint32_t const cbVmcs = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_SIZE);
2033 if (cbVmcs <= X86_PAGE_4K_SIZE)
2034 { /* likely */ }
2035 else
2036 {
2037 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_INVALID_VMCS_SIZE;
2038 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2039 }
2040
2041 /*
2042 * Allocate per-VM VT-x structures.
2043 */
2044 bool const fVirtApicAccess = RT_BOOL(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
2045 bool const fUseVmcsShadowing = pVM->hmr0.s.vmx.fUseVmcsShadowing;
2046 VMXPAGEALLOCINFO aAllocInfo[] =
2047 {
2048 { fVirtApicAccess, 0 /* Unused */, &pVM->hmr0.s.vmx.HCPhysApicAccess, (PRTR0PTR)&pVM->hmr0.s.vmx.pbApicAccess },
2049 { fUseVmcsShadowing, 0 /* Unused */, &pVM->hmr0.s.vmx.HCPhysVmreadBitmap, &pVM->hmr0.s.vmx.pvVmreadBitmap },
2050 { fUseVmcsShadowing, 0 /* Unused */, &pVM->hmr0.s.vmx.HCPhysVmwriteBitmap, &pVM->hmr0.s.vmx.pvVmwriteBitmap },
2051#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2052 { true, 0 /* Unused */, &pVM->hmr0.s.vmx.HCPhysScratch, (PRTR0PTR)&pVM->hmr0.s.vmx.pbScratch },
2053#endif
2054 };
2055
2056 int rc = hmR0VmxPagesAllocZ(&pVM->hmr0.s.vmx.hMemObj, &aAllocInfo[0], RT_ELEMENTS(aAllocInfo));
2057 if (RT_SUCCESS(rc))
2058 {
2059#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2060 /* Allocate the shadow VMCS-fields array. */
2061 if (fUseVmcsShadowing)
2062 {
2063 Assert(!pVM->hmr0.s.vmx.cShadowVmcsFields);
2064 Assert(!pVM->hmr0.s.vmx.cShadowVmcsRoFields);
2065 pVM->hmr0.s.vmx.paShadowVmcsFields = (uint32_t *)RTMemAllocZ(sizeof(g_aVmcsFields));
2066 pVM->hmr0.s.vmx.paShadowVmcsRoFields = (uint32_t *)RTMemAllocZ(sizeof(g_aVmcsFields));
2067 if (!pVM->hmr0.s.vmx.paShadowVmcsFields || !pVM->hmr0.s.vmx.paShadowVmcsRoFields)
2068 rc = VERR_NO_MEMORY;
2069 }
2070#endif
2071
2072 /*
2073 * Allocate per-VCPU VT-x structures.
2074 */
2075 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus && RT_SUCCESS(rc); idCpu++)
2076 {
2077 /* Allocate the guest VMCS structures. */
2078 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
2079 rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hmr0.s.vmx.VmcsInfo, false /* fIsNstGstVmcs */);
2080
2081#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2082 /* Allocate the nested-guest VMCS structures, when the VMX feature is exposed to the guest. */
2083 if (pVM->cpum.ro.GuestFeatures.fVmx && RT_SUCCESS(rc))
2084 rc = hmR0VmxAllocVmcsInfo(pVCpu, &pVCpu->hmr0.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */);
2085#endif
2086 }
2087 if (RT_SUCCESS(rc))
2088 return VINF_SUCCESS;
2089 }
2090 hmR0VmxStructsFree(pVM);
2091 return rc;
2092}
2093
2094
2095/**
2096 * Pre-initializes non-zero fields in VMX structures that will be allocated.
2097 *
2098 * @param pVM The cross context VM structure.
2099 */
2100static void hmR0VmxStructsInit(PVMCC pVM)
2101{
2102 /* Paranoia. */
2103 Assert(pVM->hmr0.s.vmx.pbApicAccess == NULL);
2104#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2105 Assert(pVM->hmr0.s.vmx.pbScratch == NULL);
2106#endif
2107
2108 /*
2109 * Initialize members up-front so we can cleanup en masse on allocation failures.
2110 */
2111#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2112 pVM->hmr0.s.vmx.HCPhysScratch = NIL_RTHCPHYS;
2113#endif
2114 pVM->hmr0.s.vmx.HCPhysApicAccess = NIL_RTHCPHYS;
2115 pVM->hmr0.s.vmx.HCPhysVmreadBitmap = NIL_RTHCPHYS;
2116 pVM->hmr0.s.vmx.HCPhysVmwriteBitmap = NIL_RTHCPHYS;
2117 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2118 {
2119 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
2120 hmR0VmxVmcsInfoInit(&pVCpu->hmr0.s.vmx.VmcsInfo, &pVCpu->hm.s.vmx.VmcsInfo);
2121 hmR0VmxVmcsInfoInit(&pVCpu->hmr0.s.vmx.VmcsInfoNstGst, &pVCpu->hm.s.vmx.VmcsInfoNstGst);
2122 }
2123}
2124
2125#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2126/**
2127 * Returns whether an MSR at the given MSR-bitmap offset is intercepted or not.
2128 *
2129 * @returns @c true if the MSR is intercepted, @c false otherwise.
2130 * @param pbMsrBitmap The MSR bitmap.
2131 * @param offMsr The MSR byte offset.
2132 * @param iBit The bit offset from the byte offset.
2133 */
2134DECLINLINE(bool) hmR0VmxIsMsrBitSet(uint8_t const *pbMsrBitmap, uint16_t offMsr, int32_t iBit)
2135{
2136 Assert(offMsr + (iBit >> 3) <= X86_PAGE_4K_SIZE);
2137 return ASMBitTest(pbMsrBitmap + offMsr, iBit);
2138}
2139#endif
2140
2141/**
2142 * Sets the permission bits for the specified MSR in the given MSR bitmap.
2143 *
2144 * If the passed VMCS is a nested-guest VMCS, this function ensures that the
2145 * read/write intercept is cleared from the MSR bitmap used for hardware-assisted
2146 * VMX execution of the nested-guest, only if nested-guest is also not intercepting
2147 * the read/write access of this MSR.
2148 *
2149 * @param pVCpu The cross context virtual CPU structure.
2150 * @param pVmcsInfo The VMCS info. object.
2151 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
2152 * @param idMsr The MSR value.
2153 * @param fMsrpm The MSR permissions (see VMXMSRPM_XXX). This must
2154 * include both a read -and- a write permission!
2155 *
2156 * @sa CPUMGetVmxMsrPermission.
2157 * @remarks Can be called with interrupts disabled.
2158 */
2159static void hmR0VmxSetMsrPermission(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs, uint32_t idMsr, uint32_t fMsrpm)
2160{
2161 uint8_t *pbMsrBitmap = (uint8_t *)pVmcsInfo->pvMsrBitmap;
2162 Assert(pbMsrBitmap);
2163 Assert(VMXMSRPM_IS_FLAG_VALID(fMsrpm));
2164
2165 /*
2166 * MSR-bitmap Layout:
2167 * Byte index MSR range Interpreted as
2168 * 0x000 - 0x3ff 0x00000000 - 0x00001fff Low MSR read bits.
2169 * 0x400 - 0x7ff 0xc0000000 - 0xc0001fff High MSR read bits.
2170 * 0x800 - 0xbff 0x00000000 - 0x00001fff Low MSR write bits.
2171 * 0xc00 - 0xfff 0xc0000000 - 0xc0001fff High MSR write bits.
2172 *
2173 * A bit corresponding to an MSR within the above range causes a VM-exit
2174 * if the bit is 1 on executions of RDMSR/WRMSR. If an MSR falls out of
2175 * the MSR range, it always cause a VM-exit.
2176 *
2177 * See Intel spec. 24.6.9 "MSR-Bitmap Address".
2178 */
2179 uint16_t const offBitmapRead = 0;
2180 uint16_t const offBitmapWrite = 0x800;
2181 uint16_t offMsr;
2182 int32_t iBit;
2183 if (idMsr <= UINT32_C(0x00001fff))
2184 {
2185 offMsr = 0;
2186 iBit = idMsr;
2187 }
2188 else if (idMsr - UINT32_C(0xc0000000) <= UINT32_C(0x00001fff))
2189 {
2190 offMsr = 0x400;
2191 iBit = idMsr - UINT32_C(0xc0000000);
2192 }
2193 else
2194 AssertMsgFailedReturnVoid(("Invalid MSR %#RX32\n", idMsr));
2195
2196 /*
2197 * Set the MSR read permission.
2198 */
2199 uint16_t const offMsrRead = offBitmapRead + offMsr;
2200 Assert(offMsrRead + (iBit >> 3) < offBitmapWrite);
2201 if (fMsrpm & VMXMSRPM_ALLOW_RD)
2202 {
2203#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2204 bool const fClear = !fIsNstGstVmcs ? true
2205 : !hmR0VmxIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, offMsrRead, iBit);
2206#else
2207 RT_NOREF2(pVCpu, fIsNstGstVmcs);
2208 bool const fClear = true;
2209#endif
2210 if (fClear)
2211 ASMBitClear(pbMsrBitmap + offMsrRead, iBit);
2212 }
2213 else
2214 ASMBitSet(pbMsrBitmap + offMsrRead, iBit);
2215
2216 /*
2217 * Set the MSR write permission.
2218 */
2219 uint16_t const offMsrWrite = offBitmapWrite + offMsr;
2220 Assert(offMsrWrite + (iBit >> 3) < X86_PAGE_4K_SIZE);
2221 if (fMsrpm & VMXMSRPM_ALLOW_WR)
2222 {
2223#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2224 bool const fClear = !fIsNstGstVmcs ? true
2225 : !hmR0VmxIsMsrBitSet(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, offMsrWrite, iBit);
2226#else
2227 RT_NOREF2(pVCpu, fIsNstGstVmcs);
2228 bool const fClear = true;
2229#endif
2230 if (fClear)
2231 ASMBitClear(pbMsrBitmap + offMsrWrite, iBit);
2232 }
2233 else
2234 ASMBitSet(pbMsrBitmap + offMsrWrite, iBit);
2235}
2236
2237
2238/**
2239 * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
2240 * area.
2241 *
2242 * @returns VBox status code.
2243 * @param pVCpu The cross context virtual CPU structure.
2244 * @param pVmcsInfo The VMCS info. object.
2245 * @param cMsrs The number of MSRs.
2246 */
2247static int hmR0VmxSetAutoLoadStoreMsrCount(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint32_t cMsrs)
2248{
2249 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
2250 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc);
2251 if (RT_LIKELY(cMsrs < cMaxSupportedMsrs))
2252 {
2253 /* Commit the MSR counts to the VMCS and update the cache. */
2254 if (pVmcsInfo->cEntryMsrLoad != cMsrs)
2255 {
2256 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs); AssertRC(rc);
2257 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs); AssertRC(rc);
2258 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs); AssertRC(rc);
2259 pVmcsInfo->cEntryMsrLoad = cMsrs;
2260 pVmcsInfo->cExitMsrStore = cMsrs;
2261 pVmcsInfo->cExitMsrLoad = cMsrs;
2262 }
2263 return VINF_SUCCESS;
2264 }
2265
2266 LogRel(("Auto-load/store MSR count exceeded! cMsrs=%u MaxSupported=%u\n", cMsrs, cMaxSupportedMsrs));
2267 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
2268 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2269}
2270
2271
2272/**
2273 * Adds a new (or updates the value of an existing) guest/host MSR
2274 * pair to be swapped during the world-switch as part of the
2275 * auto-load/store MSR area in the VMCS.
2276 *
2277 * @returns VBox status code.
2278 * @param pVCpu The cross context virtual CPU structure.
2279 * @param pVmxTransient The VMX-transient structure.
2280 * @param idMsr The MSR.
2281 * @param uGuestMsrValue Value of the guest MSR.
2282 * @param fSetReadWrite Whether to set the guest read/write access of this
2283 * MSR (thus not causing a VM-exit).
2284 * @param fUpdateHostMsr Whether to update the value of the host MSR if
2285 * necessary.
2286 */
2287static int hmR0VmxAddAutoLoadStoreMsr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t idMsr, uint64_t uGuestMsrValue,
2288 bool fSetReadWrite, bool fUpdateHostMsr)
2289{
2290 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2291 bool const fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
2292 PVMXAUTOMSR pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
2293 uint32_t cMsrs = pVmcsInfo->cEntryMsrLoad;
2294 uint32_t i;
2295
2296 /* Paranoia. */
2297 Assert(pGuestMsrLoad);
2298
2299#ifndef DEBUG_bird
2300 LogFlowFunc(("pVCpu=%p idMsr=%#RX32 uGuestMsrValue=%#RX64\n", pVCpu, idMsr, uGuestMsrValue));
2301#endif
2302
2303 /* Check if the MSR already exists in the VM-entry MSR-load area. */
2304 for (i = 0; i < cMsrs; i++)
2305 {
2306 if (pGuestMsrLoad[i].u32Msr == idMsr)
2307 break;
2308 }
2309
2310 bool fAdded = false;
2311 if (i == cMsrs)
2312 {
2313 /* The MSR does not exist, bump the MSR count to make room for the new MSR. */
2314 ++cMsrs;
2315 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
2316 AssertMsgRCReturn(rc, ("Insufficient space to add MSR to VM-entry MSR-load/store area %u\n", idMsr), rc);
2317
2318 /* Set the guest to read/write this MSR without causing VM-exits. */
2319 if ( fSetReadWrite
2320 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
2321 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_ALLOW_RD_WR);
2322
2323 Log4Func(("Added MSR %#RX32, cMsrs=%u\n", idMsr, cMsrs));
2324 fAdded = true;
2325 }
2326
2327 /* Update the MSR value for the newly added or already existing MSR. */
2328 pGuestMsrLoad[i].u32Msr = idMsr;
2329 pGuestMsrLoad[i].u64Value = uGuestMsrValue;
2330
2331 /* Create the corresponding slot in the VM-exit MSR-store area if we use a different page. */
2332 if (hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
2333 {
2334 PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
2335 pGuestMsrStore[i].u32Msr = idMsr;
2336 pGuestMsrStore[i].u64Value = uGuestMsrValue;
2337 }
2338
2339 /* Update the corresponding slot in the host MSR area. */
2340 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
2341 Assert(pHostMsr != pVmcsInfo->pvGuestMsrLoad);
2342 Assert(pHostMsr != pVmcsInfo->pvGuestMsrStore);
2343 pHostMsr[i].u32Msr = idMsr;
2344
2345 /*
2346 * Only if the caller requests to update the host MSR value AND we've newly added the
2347 * MSR to the host MSR area do we actually update the value. Otherwise, it will be
2348 * updated by hmR0VmxUpdateAutoLoadHostMsrs().
2349 *
2350 * We do this for performance reasons since reading MSRs may be quite expensive.
2351 */
2352 if (fAdded)
2353 {
2354 if (fUpdateHostMsr)
2355 {
2356 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2357 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2358 pHostMsr[i].u64Value = ASMRdMsr(idMsr);
2359 }
2360 else
2361 {
2362 /* Someone else can do the work. */
2363 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
2364 }
2365 }
2366 return VINF_SUCCESS;
2367}
2368
2369
2370/**
2371 * Removes a guest/host MSR pair to be swapped during the world-switch from the
2372 * auto-load/store MSR area in the VMCS.
2373 *
2374 * @returns VBox status code.
2375 * @param pVCpu The cross context virtual CPU structure.
2376 * @param pVmxTransient The VMX-transient structure.
2377 * @param idMsr The MSR.
2378 */
2379static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t idMsr)
2380{
2381 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
2382 bool const fIsNstGstVmcs = pVmxTransient->fIsNestedGuest;
2383 PVMXAUTOMSR pGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
2384 uint32_t cMsrs = pVmcsInfo->cEntryMsrLoad;
2385
2386#ifndef DEBUG_bird
2387 LogFlowFunc(("pVCpu=%p idMsr=%#RX32\n", pVCpu, idMsr));
2388#endif
2389
2390 for (uint32_t i = 0; i < cMsrs; i++)
2391 {
2392 /* Find the MSR. */
2393 if (pGuestMsrLoad[i].u32Msr == idMsr)
2394 {
2395 /*
2396 * If it's the last MSR, we only need to reduce the MSR count.
2397 * If it's -not- the last MSR, copy the last MSR in place of it and reduce the MSR count.
2398 */
2399 if (i < cMsrs - 1)
2400 {
2401 /* Remove it from the VM-entry MSR-load area. */
2402 pGuestMsrLoad[i].u32Msr = pGuestMsrLoad[cMsrs - 1].u32Msr;
2403 pGuestMsrLoad[i].u64Value = pGuestMsrLoad[cMsrs - 1].u64Value;
2404
2405 /* Remove it from the VM-exit MSR-store area if it's in a different page. */
2406 if (hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
2407 {
2408 PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
2409 Assert(pGuestMsrStore[i].u32Msr == idMsr);
2410 pGuestMsrStore[i].u32Msr = pGuestMsrStore[cMsrs - 1].u32Msr;
2411 pGuestMsrStore[i].u64Value = pGuestMsrStore[cMsrs - 1].u64Value;
2412 }
2413
2414 /* Remove it from the VM-exit MSR-load area. */
2415 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
2416 Assert(pHostMsr[i].u32Msr == idMsr);
2417 pHostMsr[i].u32Msr = pHostMsr[cMsrs - 1].u32Msr;
2418 pHostMsr[i].u64Value = pHostMsr[cMsrs - 1].u64Value;
2419 }
2420
2421 /* Reduce the count to reflect the removed MSR and bail. */
2422 --cMsrs;
2423 break;
2424 }
2425 }
2426
2427 /* Update the VMCS if the count changed (meaning the MSR was found and removed). */
2428 if (cMsrs != pVmcsInfo->cEntryMsrLoad)
2429 {
2430 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
2431 AssertRCReturn(rc, rc);
2432
2433 /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
2434 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
2435 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, fIsNstGstVmcs, idMsr, VMXMSRPM_EXIT_RD | VMXMSRPM_EXIT_WR);
2436
2437 Log4Func(("Removed MSR %#RX32, cMsrs=%u\n", idMsr, cMsrs));
2438 return VINF_SUCCESS;
2439 }
2440
2441 return VERR_NOT_FOUND;
2442}
2443
2444
2445/**
2446 * Checks if the specified guest MSR is part of the VM-entry MSR-load area.
2447 *
2448 * @returns @c true if found, @c false otherwise.
2449 * @param pVmcsInfo The VMCS info. object.
2450 * @param idMsr The MSR to find.
2451 */
2452static bool hmR0VmxIsAutoLoadGuestMsr(PCVMXVMCSINFO pVmcsInfo, uint32_t idMsr)
2453{
2454 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
2455 uint32_t const cMsrs = pVmcsInfo->cEntryMsrLoad;
2456 Assert(pMsrs);
2457 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
2458 for (uint32_t i = 0; i < cMsrs; i++)
2459 {
2460 if (pMsrs[i].u32Msr == idMsr)
2461 return true;
2462 }
2463 return false;
2464}
2465
2466
2467/**
2468 * Updates the value of all host MSRs in the VM-exit MSR-load area.
2469 *
2470 * @param pVCpu The cross context virtual CPU structure.
2471 * @param pVmcsInfo The VMCS info. object.
2472 *
2473 * @remarks No-long-jump zone!!!
2474 */
2475static void hmR0VmxUpdateAutoLoadHostMsrs(PCVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
2476{
2477 RT_NOREF(pVCpu);
2478 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2479
2480 PVMXAUTOMSR pHostMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
2481 uint32_t const cMsrs = pVmcsInfo->cExitMsrLoad;
2482 Assert(pHostMsrLoad);
2483 Assert(sizeof(*pHostMsrLoad) * cMsrs <= X86_PAGE_4K_SIZE);
2484 LogFlowFunc(("pVCpu=%p cMsrs=%u\n", pVCpu, cMsrs));
2485 for (uint32_t i = 0; i < cMsrs; i++)
2486 {
2487 /*
2488 * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
2489 * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
2490 */
2491 if (pHostMsrLoad[i].u32Msr == MSR_K6_EFER)
2492 pHostMsrLoad[i].u64Value = g_uHmVmxHostMsrEfer;
2493 else
2494 pHostMsrLoad[i].u64Value = ASMRdMsr(pHostMsrLoad[i].u32Msr);
2495 }
2496}
2497
2498
2499/**
2500 * Saves a set of host MSRs to allow read/write passthru access to the guest and
2501 * perform lazy restoration of the host MSRs while leaving VT-x.
2502 *
2503 * @param pVCpu The cross context virtual CPU structure.
2504 *
2505 * @remarks No-long-jump zone!!!
2506 */
2507static void hmR0VmxLazySaveHostMsrs(PVMCPUCC pVCpu)
2508{
2509 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2510
2511 /*
2512 * Note: If you're adding MSRs here, make sure to update the MSR-bitmap accesses in hmR0VmxSetupVmcsProcCtls().
2513 */
2514 if (!(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST))
2515 {
2516 Assert(!(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)); /* Guest MSRs better not be loaded now. */
2517 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fAllow64BitGuests)
2518 {
2519 pVCpu->hmr0.s.vmx.u64HostMsrLStar = ASMRdMsr(MSR_K8_LSTAR);
2520 pVCpu->hmr0.s.vmx.u64HostMsrStar = ASMRdMsr(MSR_K6_STAR);
2521 pVCpu->hmr0.s.vmx.u64HostMsrSfMask = ASMRdMsr(MSR_K8_SF_MASK);
2522 pVCpu->hmr0.s.vmx.u64HostMsrKernelGsBase = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
2523 }
2524 pVCpu->hmr0.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;
2525 }
2526}
2527
2528
2529/**
2530 * Checks whether the MSR belongs to the set of guest MSRs that we restore
2531 * lazily while leaving VT-x.
2532 *
2533 * @returns true if it does, false otherwise.
2534 * @param pVCpu The cross context virtual CPU structure.
2535 * @param idMsr The MSR to check.
2536 */
2537static bool hmR0VmxIsLazyGuestMsr(PCVMCPUCC pVCpu, uint32_t idMsr)
2538{
2539 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fAllow64BitGuests)
2540 {
2541 switch (idMsr)
2542 {
2543 case MSR_K8_LSTAR:
2544 case MSR_K6_STAR:
2545 case MSR_K8_SF_MASK:
2546 case MSR_K8_KERNEL_GS_BASE:
2547 return true;
2548 }
2549 }
2550 return false;
2551}
2552
2553
2554/**
2555 * Loads a set of guests MSRs to allow read/passthru to the guest.
2556 *
2557 * The name of this function is slightly confusing. This function does NOT
2558 * postpone loading, but loads the MSR right now. "hmR0VmxLazy" is simply a
2559 * common prefix for functions dealing with "lazy restoration" of the shared
2560 * MSRs.
2561 *
2562 * @param pVCpu The cross context virtual CPU structure.
2563 *
2564 * @remarks No-long-jump zone!!!
2565 */
2566static void hmR0VmxLazyLoadGuestMsrs(PVMCPUCC pVCpu)
2567{
2568 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2569 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2570
2571 Assert(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
2572 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fAllow64BitGuests)
2573 {
2574 /*
2575 * If the guest MSRs are not loaded -and- if all the guest MSRs are identical
2576 * to the MSRs on the CPU (which are the saved host MSRs, see assertion above) then
2577 * we can skip a few MSR writes.
2578 *
2579 * Otherwise, it implies either 1. they're not loaded, or 2. they're loaded but the
2580 * guest MSR values in the guest-CPU context might be different to what's currently
2581 * loaded in the CPU. In either case, we need to write the new guest MSR values to the
2582 * CPU, see @bugref{8728}.
2583 */
2584 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
2585 if ( !(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
2586 && pCtx->msrKERNELGSBASE == pVCpu->hmr0.s.vmx.u64HostMsrKernelGsBase
2587 && pCtx->msrLSTAR == pVCpu->hmr0.s.vmx.u64HostMsrLStar
2588 && pCtx->msrSTAR == pVCpu->hmr0.s.vmx.u64HostMsrStar
2589 && pCtx->msrSFMASK == pVCpu->hmr0.s.vmx.u64HostMsrSfMask)
2590 {
2591#ifdef VBOX_STRICT
2592 Assert(ASMRdMsr(MSR_K8_KERNEL_GS_BASE) == pCtx->msrKERNELGSBASE);
2593 Assert(ASMRdMsr(MSR_K8_LSTAR) == pCtx->msrLSTAR);
2594 Assert(ASMRdMsr(MSR_K6_STAR) == pCtx->msrSTAR);
2595 Assert(ASMRdMsr(MSR_K8_SF_MASK) == pCtx->msrSFMASK);
2596#endif
2597 }
2598 else
2599 {
2600 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pCtx->msrKERNELGSBASE);
2601 ASMWrMsr(MSR_K8_LSTAR, pCtx->msrLSTAR);
2602 ASMWrMsr(MSR_K6_STAR, pCtx->msrSTAR);
2603 /* The system call flag mask register isn't as benign and accepting of all
2604 values as the above, so mask it to avoid #GP'ing on corrupted input. */
2605 Assert(!(pCtx->msrSFMASK & ~(uint64_t)UINT32_MAX));
2606 ASMWrMsr(MSR_K8_SF_MASK, pCtx->msrSFMASK & UINT32_MAX);
2607 }
2608 }
2609 pVCpu->hmr0.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;
2610}
2611
2612
2613/**
2614 * Performs lazy restoration of the set of host MSRs if they were previously
2615 * loaded with guest MSR values.
2616 *
2617 * @param pVCpu The cross context virtual CPU structure.
2618 *
2619 * @remarks No-long-jump zone!!!
2620 * @remarks The guest MSRs should have been saved back into the guest-CPU
2621 * context by hmR0VmxImportGuestState()!!!
2622 */
2623static void hmR0VmxLazyRestoreHostMsrs(PVMCPUCC pVCpu)
2624{
2625 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2626 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
2627
2628 if (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
2629 {
2630 Assert(pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
2631 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fAllow64BitGuests)
2632 {
2633 ASMWrMsr(MSR_K8_LSTAR, pVCpu->hmr0.s.vmx.u64HostMsrLStar);
2634 ASMWrMsr(MSR_K6_STAR, pVCpu->hmr0.s.vmx.u64HostMsrStar);
2635 ASMWrMsr(MSR_K8_SF_MASK, pVCpu->hmr0.s.vmx.u64HostMsrSfMask);
2636 ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hmr0.s.vmx.u64HostMsrKernelGsBase);
2637 }
2638 }
2639 pVCpu->hmr0.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST);
2640}
2641
2642
2643/**
2644 * Verifies that our cached values of the VMCS fields are all consistent with
2645 * what's actually present in the VMCS.
2646 *
2647 * @returns VBox status code.
2648 * @retval VINF_SUCCESS if all our caches match their respective VMCS fields.
2649 * @retval VERR_VMX_VMCS_FIELD_CACHE_INVALID if a cache field doesn't match the
2650 * VMCS content. HMCPU error-field is
2651 * updated, see VMX_VCI_XXX.
2652 * @param pVCpu The cross context virtual CPU structure.
2653 * @param pVmcsInfo The VMCS info. object.
2654 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
2655 */
2656static int hmR0VmxCheckCachedVmcsCtls(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
2657{
2658 const char * const pcszVmcs = fIsNstGstVmcs ? "Nested-guest VMCS" : "VMCS";
2659
2660 uint32_t u32Val;
2661 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
2662 AssertRC(rc);
2663 AssertMsgReturnStmt(pVmcsInfo->u32EntryCtls == u32Val,
2664 ("%s entry controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32EntryCtls, u32Val),
2665 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_ENTRY,
2666 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2667
2668 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
2669 AssertRC(rc);
2670 AssertMsgReturnStmt(pVmcsInfo->u32ExitCtls == u32Val,
2671 ("%s exit controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ExitCtls, u32Val),
2672 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_EXIT,
2673 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2674
2675 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
2676 AssertRC(rc);
2677 AssertMsgReturnStmt(pVmcsInfo->u32PinCtls == u32Val,
2678 ("%s pin controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32PinCtls, u32Val),
2679 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PIN_EXEC,
2680 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2681
2682 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
2683 AssertRC(rc);
2684 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls == u32Val,
2685 ("%s proc controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls, u32Val),
2686 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC,
2687 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2688
2689 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
2690 {
2691 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
2692 AssertRC(rc);
2693 AssertMsgReturnStmt(pVmcsInfo->u32ProcCtls2 == u32Val,
2694 ("%s proc2 controls mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32ProcCtls2, u32Val),
2695 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC2,
2696 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2697 }
2698
2699 uint64_t u64Val;
2700 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
2701 {
2702 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_PROC_EXEC3_FULL, &u64Val);
2703 AssertRC(rc);
2704 AssertMsgReturnStmt(pVmcsInfo->u64ProcCtls3 == u64Val,
2705 ("%s proc3 controls mismatch: Cache=%#RX32 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64ProcCtls3, u64Val),
2706 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_PROC_EXEC3,
2707 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2708 }
2709
2710 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val);
2711 AssertRC(rc);
2712 AssertMsgReturnStmt(pVmcsInfo->u32XcptBitmap == u32Val,
2713 ("%s exception bitmap mismatch: Cache=%#RX32 VMCS=%#RX32\n", pcszVmcs, pVmcsInfo->u32XcptBitmap, u32Val),
2714 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_XCPT_BITMAP,
2715 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2716
2717 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, &u64Val);
2718 AssertRC(rc);
2719 AssertMsgReturnStmt(pVmcsInfo->u64TscOffset == u64Val,
2720 ("%s TSC offset mismatch: Cache=%#RX64 VMCS=%#RX64\n", pcszVmcs, pVmcsInfo->u64TscOffset, u64Val),
2721 pVCpu->hm.s.u32HMError = VMX_VCI_CTRL_TSC_OFFSET,
2722 VERR_VMX_VMCS_FIELD_CACHE_INVALID);
2723
2724 NOREF(pcszVmcs);
2725 return VINF_SUCCESS;
2726}
2727
2728#ifdef VBOX_STRICT
2729
2730/**
2731 * Verifies that our cached host EFER MSR value has not changed since we cached it.
2732 *
2733 * @param pVmcsInfo The VMCS info. object.
2734 */
2735static void hmR0VmxCheckHostEferMsr(PCVMXVMCSINFO pVmcsInfo)
2736{
2737 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2738
2739 if (pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
2740 {
2741 uint64_t const uHostEferMsr = ASMRdMsr(MSR_K6_EFER);
2742 uint64_t const uHostEferMsrCache = g_uHmVmxHostMsrEfer;
2743 uint64_t uVmcsEferMsrVmcs;
2744 int rc = VMXReadVmcs64(VMX_VMCS64_HOST_EFER_FULL, &uVmcsEferMsrVmcs);
2745 AssertRC(rc);
2746
2747 AssertMsgReturnVoid(uHostEferMsr == uVmcsEferMsrVmcs,
2748 ("EFER Host/VMCS mismatch! host=%#RX64 vmcs=%#RX64\n", uHostEferMsr, uVmcsEferMsrVmcs));
2749 AssertMsgReturnVoid(uHostEferMsr == uHostEferMsrCache,
2750 ("EFER Host/Cache mismatch! host=%#RX64 cache=%#RX64\n", uHostEferMsr, uHostEferMsrCache));
2751 }
2752}
2753
2754
2755/**
2756 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the
2757 * VMCS are correct.
2758 *
2759 * @param pVCpu The cross context virtual CPU structure.
2760 * @param pVmcsInfo The VMCS info. object.
2761 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
2762 */
2763static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
2764{
2765 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2766
2767 /* Read the various MSR-area counts from the VMCS. */
2768 uint32_t cEntryLoadMsrs;
2769 uint32_t cExitStoreMsrs;
2770 uint32_t cExitLoadMsrs;
2771 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cEntryLoadMsrs); AssertRC(rc);
2772 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cExitStoreMsrs); AssertRC(rc);
2773 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cExitLoadMsrs); AssertRC(rc);
2774
2775 /* Verify all the MSR counts are the same. */
2776 Assert(cEntryLoadMsrs == cExitStoreMsrs);
2777 Assert(cExitStoreMsrs == cExitLoadMsrs);
2778 uint32_t const cMsrs = cExitLoadMsrs;
2779
2780 /* Verify the MSR counts do not exceed the maximum count supported by the hardware. */
2781 Assert(cMsrs < VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
2782
2783 /* Verify the MSR counts are within the allocated page size. */
2784 Assert(sizeof(VMXAUTOMSR) * cMsrs <= X86_PAGE_4K_SIZE);
2785
2786 /* Verify the relevant contents of the MSR areas match. */
2787 PCVMXAUTOMSR pGuestMsrLoad = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
2788 PCVMXAUTOMSR pGuestMsrStore = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
2789 PCVMXAUTOMSR pHostMsrLoad = (PCVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
2790 bool const fSeparateExitMsrStorePage = hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo);
2791 for (uint32_t i = 0; i < cMsrs; i++)
2792 {
2793 /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */
2794 if (fSeparateExitMsrStorePage)
2795 {
2796 AssertMsgReturnVoid(pGuestMsrLoad->u32Msr == pGuestMsrStore->u32Msr,
2797 ("GuestMsrLoad=%#RX32 GuestMsrStore=%#RX32 cMsrs=%u\n",
2798 pGuestMsrLoad->u32Msr, pGuestMsrStore->u32Msr, cMsrs));
2799 }
2800
2801 AssertMsgReturnVoid(pHostMsrLoad->u32Msr == pGuestMsrLoad->u32Msr,
2802 ("HostMsrLoad=%#RX32 GuestMsrLoad=%#RX32 cMsrs=%u\n",
2803 pHostMsrLoad->u32Msr, pGuestMsrLoad->u32Msr, cMsrs));
2804
2805 uint64_t const u64HostMsr = ASMRdMsr(pHostMsrLoad->u32Msr);
2806 AssertMsgReturnVoid(pHostMsrLoad->u64Value == u64HostMsr,
2807 ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
2808 pHostMsrLoad->u32Msr, pHostMsrLoad->u64Value, u64HostMsr, cMsrs));
2809
2810 /* Verify that cached host EFER MSR matches what's loaded on the CPU. */
2811 bool const fIsEferMsr = RT_BOOL(pHostMsrLoad->u32Msr == MSR_K6_EFER);
2812 AssertMsgReturnVoid(!fIsEferMsr || u64HostMsr == g_uHmVmxHostMsrEfer,
2813 ("Cached=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n", g_uHmVmxHostMsrEfer, u64HostMsr, cMsrs));
2814
2815 /* Verify that the accesses are as expected in the MSR bitmap for auto-load/store MSRs. */
2816 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
2817 {
2818 uint32_t const fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, pGuestMsrLoad->u32Msr);
2819 if (fIsEferMsr)
2820 {
2821 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_EXIT_RD), ("Passthru read for EFER MSR!?\n"));
2822 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_EXIT_WR), ("Passthru write for EFER MSR!?\n"));
2823 }
2824 else
2825 {
2826 /* Verify LBR MSRs (used only for debugging) are intercepted. We don't passthru these MSRs to the guest yet. */
2827 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
2828 if ( pVM->hmr0.s.vmx.fLbr
2829 && ( hmR0VmxIsLbrBranchFromMsr(pVM, pGuestMsrLoad->u32Msr, NULL /* pidxMsr */)
2830 || hmR0VmxIsLbrBranchToMsr(pVM, pGuestMsrLoad->u32Msr, NULL /* pidxMsr */)
2831 || pGuestMsrLoad->u32Msr == pVM->hmr0.s.vmx.idLbrTosMsr))
2832 {
2833 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_MASK) == VMXMSRPM_EXIT_RD_WR,
2834 ("u32Msr=%#RX32 cMsrs=%u Passthru read/write for LBR MSRs!\n",
2835 pGuestMsrLoad->u32Msr, cMsrs));
2836 }
2837 else if (!fIsNstGstVmcs)
2838 {
2839 AssertMsgReturnVoid((fMsrpm & VMXMSRPM_MASK) == VMXMSRPM_ALLOW_RD_WR,
2840 ("u32Msr=%#RX32 cMsrs=%u No passthru read/write!\n", pGuestMsrLoad->u32Msr, cMsrs));
2841 }
2842 else
2843 {
2844 /*
2845 * A nested-guest VMCS must -also- allow read/write passthrough for the MSR for us to
2846 * execute a nested-guest with MSR passthrough.
2847 *
2848 * Check if the nested-guest MSR bitmap allows passthrough, and if so, assert that we
2849 * allow passthrough too.
2850 */
2851 void const *pvMsrBitmapNstGst = pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap;
2852 Assert(pvMsrBitmapNstGst);
2853 uint32_t const fMsrpmNstGst = CPUMGetVmxMsrPermission(pvMsrBitmapNstGst, pGuestMsrLoad->u32Msr);
2854 AssertMsgReturnVoid(fMsrpm == fMsrpmNstGst,
2855 ("u32Msr=%#RX32 cMsrs=%u Permission mismatch fMsrpm=%#x fMsrpmNstGst=%#x!\n",
2856 pGuestMsrLoad->u32Msr, cMsrs, fMsrpm, fMsrpmNstGst));
2857 }
2858 }
2859 }
2860
2861 /* Move to the next MSR. */
2862 pHostMsrLoad++;
2863 pGuestMsrLoad++;
2864 pGuestMsrStore++;
2865 }
2866}
2867
2868#endif /* VBOX_STRICT */
2869
2870/**
2871 * Flushes the TLB using EPT.
2872 *
2873 * @returns VBox status code.
2874 * @param pVCpu The cross context virtual CPU structure of the calling
2875 * EMT. Can be NULL depending on @a enmTlbFlush.
2876 * @param pVmcsInfo The VMCS info. object. Can be NULL depending on @a
2877 * enmTlbFlush.
2878 * @param enmTlbFlush Type of flush.
2879 *
2880 * @remarks Caller is responsible for making sure this function is called only
2881 * when NestedPaging is supported and providing @a enmTlbFlush that is
2882 * supported by the CPU.
2883 * @remarks Can be called with interrupts disabled.
2884 */
2885static void hmR0VmxFlushEpt(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, VMXTLBFLUSHEPT enmTlbFlush)
2886{
2887 uint64_t au64Descriptor[2];
2888 if (enmTlbFlush == VMXTLBFLUSHEPT_ALL_CONTEXTS)
2889 au64Descriptor[0] = 0;
2890 else
2891 {
2892 Assert(pVCpu);
2893 Assert(pVmcsInfo);
2894 au64Descriptor[0] = pVmcsInfo->HCPhysEPTP;
2895 }
2896 au64Descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
2897
2898 int rc = VMXR0InvEPT(enmTlbFlush, &au64Descriptor[0]);
2899 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %#RHp failed. rc=%Rrc\n", enmTlbFlush, au64Descriptor[0], rc));
2900
2901 if ( RT_SUCCESS(rc)
2902 && pVCpu)
2903 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
2904}
2905
2906
2907/**
2908 * Flushes the TLB using VPID.
2909 *
2910 * @returns VBox status code.
2911 * @param pVCpu The cross context virtual CPU structure of the calling
2912 * EMT. Can be NULL depending on @a enmTlbFlush.
2913 * @param enmTlbFlush Type of flush.
2914 * @param GCPtr Virtual address of the page to flush (can be 0 depending
2915 * on @a enmTlbFlush).
2916 *
2917 * @remarks Can be called with interrupts disabled.
2918 */
2919static void hmR0VmxFlushVpid(PVMCPUCC pVCpu, VMXTLBFLUSHVPID enmTlbFlush, RTGCPTR GCPtr)
2920{
2921 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fVpid);
2922
2923 uint64_t au64Descriptor[2];
2924 if (enmTlbFlush == VMXTLBFLUSHVPID_ALL_CONTEXTS)
2925 {
2926 au64Descriptor[0] = 0;
2927 au64Descriptor[1] = 0;
2928 }
2929 else
2930 {
2931 AssertPtr(pVCpu);
2932 AssertMsg(pVCpu->hmr0.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hmr0.s.uCurrentAsid));
2933 AssertMsg(pVCpu->hmr0.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hmr0.s.uCurrentAsid));
2934 au64Descriptor[0] = pVCpu->hmr0.s.uCurrentAsid;
2935 au64Descriptor[1] = GCPtr;
2936 }
2937
2938 int rc = VMXR0InvVPID(enmTlbFlush, &au64Descriptor[0]);
2939 AssertMsg(rc == VINF_SUCCESS,
2940 ("VMXR0InvVPID %#x %u %RGv failed with %Rrc\n", enmTlbFlush, pVCpu ? pVCpu->hmr0.s.uCurrentAsid : 0, GCPtr, rc));
2941
2942 if ( RT_SUCCESS(rc)
2943 && pVCpu)
2944 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
2945 NOREF(rc);
2946}
2947
2948
2949/**
2950 * Invalidates a guest page by guest virtual address. Only relevant for EPT/VPID,
2951 * otherwise there is nothing really to invalidate.
2952 *
2953 * @returns VBox status code.
2954 * @param pVCpu The cross context virtual CPU structure.
2955 * @param GCVirt Guest virtual address of the page to invalidate.
2956 */
2957VMMR0DECL(int) VMXR0InvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCVirt)
2958{
2959 AssertPtr(pVCpu);
2960 LogFlowFunc(("pVCpu=%p GCVirt=%RGv\n", pVCpu, GCVirt));
2961
2962 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
2963 {
2964 /*
2965 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for
2966 * the EPT case. See @bugref{6043} and @bugref{6177}.
2967 *
2968 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*()
2969 * as this function maybe called in a loop with individual addresses.
2970 */
2971 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2972 if (pVM->hmr0.s.vmx.fVpid)
2973 {
2974 if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2975 {
2976 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_INDIV_ADDR, GCVirt);
2977 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
2978 }
2979 else
2980 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2981 }
2982 else if (pVM->hmr0.s.fNestedPaging)
2983 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
2984 }
2985
2986 return VINF_SUCCESS;
2987}
2988
2989
2990/**
2991 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
2992 * case where neither EPT nor VPID is supported by the CPU.
2993 *
2994 * @param pHostCpu The HM physical-CPU structure.
2995 * @param pVCpu The cross context virtual CPU structure.
2996 *
2997 * @remarks Called with interrupts disabled.
2998 */
2999static void hmR0VmxFlushTaggedTlbNone(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu)
3000{
3001 AssertPtr(pVCpu);
3002 AssertPtr(pHostCpu);
3003
3004 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
3005
3006 Assert(pHostCpu->idCpu != NIL_RTCPUID);
3007 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu;
3008 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes;
3009 pVCpu->hmr0.s.fForceTLBFlush = false;
3010 return;
3011}
3012
3013
3014/**
3015 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
3016 *
3017 * @param pHostCpu The HM physical-CPU structure.
3018 * @param pVCpu The cross context virtual CPU structure.
3019 * @param pVmcsInfo The VMCS info. object.
3020 *
3021 * @remarks All references to "ASID" in this function pertains to "VPID" in Intel's
3022 * nomenclature. The reason is, to avoid confusion in compare statements
3023 * since the host-CPU copies are named "ASID".
3024 *
3025 * @remarks Called with interrupts disabled.
3026 */
3027static void hmR0VmxFlushTaggedTlbBoth(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3028{
3029#ifdef VBOX_WITH_STATISTICS
3030 bool fTlbFlushed = false;
3031# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
3032# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
3033 if (!fTlbFlushed) \
3034 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
3035 } while (0)
3036#else
3037# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
3038# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
3039#endif
3040
3041 AssertPtr(pVCpu);
3042 AssertPtr(pHostCpu);
3043 Assert(pHostCpu->idCpu != NIL_RTCPUID);
3044
3045 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3046 AssertMsg(pVM->hmr0.s.fNestedPaging && pVM->hmr0.s.vmx.fVpid,
3047 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
3048 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hmr0.s.fNestedPaging, pVM->hmr0.s.vmx.fVpid));
3049
3050 /*
3051 * Force a TLB flush for the first world-switch if the current CPU differs from the one we
3052 * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID
3053 * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we
3054 * cannot reuse the current ASID anymore.
3055 */
3056 if ( pVCpu->hmr0.s.idLastCpu != pHostCpu->idCpu
3057 || pVCpu->hmr0.s.cTlbFlushes != pHostCpu->cTlbFlushes)
3058 {
3059 ++pHostCpu->uCurrentAsid;
3060 if (pHostCpu->uCurrentAsid >= g_uHmMaxAsid)
3061 {
3062 pHostCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
3063 pHostCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
3064 pHostCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
3065 }
3066
3067 pVCpu->hmr0.s.uCurrentAsid = pHostCpu->uCurrentAsid;
3068 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu;
3069 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes;
3070
3071 /*
3072 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
3073 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
3074 */
3075 hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVM->hmr0.s.vmx.enmTlbFlushEpt);
3076 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
3077 HMVMX_SET_TAGGED_TLB_FLUSHED();
3078 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
3079 }
3080 else if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) /* Check for explicit TLB flushes. */
3081 {
3082 /*
3083 * Changes to the EPT paging structure by VMM requires flushing-by-EPT as the CPU
3084 * creates guest-physical (ie. only EPT-tagged) mappings while traversing the EPT
3085 * tables when EPT is in use. Flushing-by-VPID will only flush linear (only
3086 * VPID-tagged) and combined (EPT+VPID tagged) mappings but not guest-physical
3087 * mappings, see @bugref{6568}.
3088 *
3089 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information".
3090 */
3091 hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVM->hmr0.s.vmx.enmTlbFlushEpt);
3092 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
3093 HMVMX_SET_TAGGED_TLB_FLUSHED();
3094 }
3095 else if (pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb)
3096 {
3097 /*
3098 * The nested-guest specifies its own guest-physical address to use as the APIC-access
3099 * address which requires flushing the TLB of EPT cached structures.
3100 *
3101 * See Intel spec. 28.3.3.4 "Guidelines for Use of the INVEPT Instruction".
3102 */
3103 hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVM->hmr0.s.vmx.enmTlbFlushEpt);
3104 pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb = false;
3105 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbNstGst);
3106 HMVMX_SET_TAGGED_TLB_FLUSHED();
3107 }
3108
3109
3110 pVCpu->hmr0.s.fForceTLBFlush = false;
3111 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
3112
3113 Assert(pVCpu->hmr0.s.idLastCpu == pHostCpu->idCpu);
3114 Assert(pVCpu->hmr0.s.cTlbFlushes == pHostCpu->cTlbFlushes);
3115 AssertMsg(pVCpu->hmr0.s.cTlbFlushes == pHostCpu->cTlbFlushes,
3116 ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hmr0.s.cTlbFlushes, pHostCpu->cTlbFlushes));
3117 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < g_uHmMaxAsid,
3118 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu,
3119 pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hmr0.s.idLastCpu, pVCpu->hmr0.s.cTlbFlushes));
3120 AssertMsg(pVCpu->hmr0.s.uCurrentAsid >= 1 && pVCpu->hmr0.s.uCurrentAsid < g_uHmMaxAsid,
3121 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hmr0.s.uCurrentAsid));
3122
3123 /* Update VMCS with the VPID. */
3124 int rc = VMXWriteVmcs16(VMX_VMCS16_VPID, pVCpu->hmr0.s.uCurrentAsid);
3125 AssertRC(rc);
3126
3127#undef HMVMX_SET_TAGGED_TLB_FLUSHED
3128}
3129
3130
3131/**
3132 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
3133 *
3134 * @param pHostCpu The HM physical-CPU structure.
3135 * @param pVCpu The cross context virtual CPU structure.
3136 * @param pVmcsInfo The VMCS info. object.
3137 *
3138 * @remarks Called with interrupts disabled.
3139 */
3140static void hmR0VmxFlushTaggedTlbEpt(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
3141{
3142 AssertPtr(pVCpu);
3143 AssertPtr(pHostCpu);
3144 Assert(pHostCpu->idCpu != NIL_RTCPUID);
3145 AssertMsg(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked without NestedPaging."));
3146 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID."));
3147
3148 /*
3149 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
3150 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
3151 */
3152 if ( pVCpu->hmr0.s.idLastCpu != pHostCpu->idCpu
3153 || pVCpu->hmr0.s.cTlbFlushes != pHostCpu->cTlbFlushes)
3154 {
3155 pVCpu->hmr0.s.fForceTLBFlush = true;
3156 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
3157 }
3158
3159 /* Check for explicit TLB flushes. */
3160 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
3161 {
3162 pVCpu->hmr0.s.fForceTLBFlush = true;
3163 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
3164 }
3165
3166 /* Check for TLB flushes while switching to/from a nested-guest. */
3167 if (pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb)
3168 {
3169 pVCpu->hmr0.s.fForceTLBFlush = true;
3170 pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb = false;
3171 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbNstGst);
3172 }
3173
3174 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu;
3175 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes;
3176
3177 if (pVCpu->hmr0.s.fForceTLBFlush)
3178 {
3179 hmR0VmxFlushEpt(pVCpu, pVmcsInfo, pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.enmTlbFlushEpt);
3180 pVCpu->hmr0.s.fForceTLBFlush = false;
3181 }
3182}
3183
3184
3185/**
3186 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
3187 *
3188 * @param pHostCpu The HM physical-CPU structure.
3189 * @param pVCpu The cross context virtual CPU structure.
3190 *
3191 * @remarks Called with interrupts disabled.
3192 */
3193static void hmR0VmxFlushTaggedTlbVpid(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu)
3194{
3195 AssertPtr(pVCpu);
3196 AssertPtr(pHostCpu);
3197 Assert(pHostCpu->idCpu != NIL_RTCPUID);
3198 AssertMsg(pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked without VPID."));
3199 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging"));
3200
3201 /*
3202 * Force a TLB flush for the first world switch if the current CPU differs from the one we
3203 * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID
3204 * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we
3205 * cannot reuse the current ASID anymore.
3206 */
3207 if ( pVCpu->hmr0.s.idLastCpu != pHostCpu->idCpu
3208 || pVCpu->hmr0.s.cTlbFlushes != pHostCpu->cTlbFlushes)
3209 {
3210 pVCpu->hmr0.s.fForceTLBFlush = true;
3211 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
3212 }
3213
3214 /* Check for explicit TLB flushes. */
3215 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
3216 {
3217 /*
3218 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see
3219 * hmR0VmxSetupTaggedTlb()) we would need to explicitly flush in this case (add an
3220 * fExplicitFlush = true here and change the pHostCpu->fFlushAsidBeforeUse check below to
3221 * include fExplicitFlush's too) - an obscure corner case.
3222 */
3223 pVCpu->hmr0.s.fForceTLBFlush = true;
3224 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
3225 }
3226
3227 /* Check for TLB flushes while switching to/from a nested-guest. */
3228 if (pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb)
3229 {
3230 pVCpu->hmr0.s.fForceTLBFlush = true;
3231 pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb = false;
3232 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbNstGst);
3233 }
3234
3235 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3236 pVCpu->hmr0.s.idLastCpu = pHostCpu->idCpu;
3237 if (pVCpu->hmr0.s.fForceTLBFlush)
3238 {
3239 ++pHostCpu->uCurrentAsid;
3240 if (pHostCpu->uCurrentAsid >= g_uHmMaxAsid)
3241 {
3242 pHostCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
3243 pHostCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
3244 pHostCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
3245 }
3246
3247 pVCpu->hmr0.s.fForceTLBFlush = false;
3248 pVCpu->hmr0.s.cTlbFlushes = pHostCpu->cTlbFlushes;
3249 pVCpu->hmr0.s.uCurrentAsid = pHostCpu->uCurrentAsid;
3250 if (pHostCpu->fFlushAsidBeforeUse)
3251 {
3252 if (pVM->hmr0.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
3253 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_SINGLE_CONTEXT, 0 /* GCPtr */);
3254 else if (pVM->hmr0.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS)
3255 {
3256 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */);
3257 pHostCpu->fFlushAsidBeforeUse = false;
3258 }
3259 else
3260 {
3261 /* hmR0VmxSetupTaggedTlb() ensures we never get here. Paranoia. */
3262 AssertMsgFailed(("Unsupported VPID-flush context type.\n"));
3263 }
3264 }
3265 }
3266
3267 AssertMsg(pVCpu->hmr0.s.cTlbFlushes == pHostCpu->cTlbFlushes,
3268 ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hmr0.s.cTlbFlushes, pHostCpu->cTlbFlushes));
3269 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < g_uHmMaxAsid,
3270 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu,
3271 pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hmr0.s.idLastCpu, pVCpu->hmr0.s.cTlbFlushes));
3272 AssertMsg(pVCpu->hmr0.s.uCurrentAsid >= 1 && pVCpu->hmr0.s.uCurrentAsid < g_uHmMaxAsid,
3273 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hmr0.s.uCurrentAsid));
3274
3275 int rc = VMXWriteVmcs16(VMX_VMCS16_VPID, pVCpu->hmr0.s.uCurrentAsid);
3276 AssertRC(rc);
3277}
3278
3279
3280/**
3281 * Flushes the guest TLB entry based on CPU capabilities.
3282 *
3283 * @param pHostCpu The HM physical-CPU structure.
3284 * @param pVCpu The cross context virtual CPU structure.
3285 * @param pVmcsInfo The VMCS info. object.
3286 *
3287 * @remarks Called with interrupts disabled.
3288 */
3289static void hmR0VmxFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
3290{
3291#ifdef HMVMX_ALWAYS_FLUSH_TLB
3292 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
3293#endif
3294 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3295 switch (pVM->hmr0.s.vmx.enmTlbFlushType)
3296 {
3297 case VMXTLBFLUSHTYPE_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pHostCpu, pVCpu, pVmcsInfo); break;
3298 case VMXTLBFLUSHTYPE_EPT: hmR0VmxFlushTaggedTlbEpt(pHostCpu, pVCpu, pVmcsInfo); break;
3299 case VMXTLBFLUSHTYPE_VPID: hmR0VmxFlushTaggedTlbVpid(pHostCpu, pVCpu); break;
3300 case VMXTLBFLUSHTYPE_NONE: hmR0VmxFlushTaggedTlbNone(pHostCpu, pVCpu); break;
3301 default:
3302 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
3303 break;
3304 }
3305 /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */
3306}
3307
3308
3309/**
3310 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
3311 * TLB entries from the host TLB before VM-entry.
3312 *
3313 * @returns VBox status code.
3314 * @param pVM The cross context VM structure.
3315 */
3316static int hmR0VmxSetupTaggedTlb(PVMCC pVM)
3317{
3318 /*
3319 * Determine optimal flush type for nested paging.
3320 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup
3321 * unrestricted guest execution (see hmR3InitFinalizeR0()).
3322 */
3323 if (pVM->hmr0.s.fNestedPaging)
3324 {
3325 if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
3326 {
3327 if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
3328 pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_SINGLE_CONTEXT;
3329 else if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
3330 pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_ALL_CONTEXTS;
3331 else
3332 {
3333 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
3334 pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
3335 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_EPT_FLUSH_TYPE_UNSUPPORTED;
3336 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3337 }
3338
3339 /* Make sure the write-back cacheable memory type for EPT is supported. */
3340 if (RT_UNLIKELY(!(g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_MEMTYPE_WB)))
3341 {
3342 pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
3343 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_EPT_MEM_TYPE_NOT_WB;
3344 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3345 }
3346
3347 /* EPT requires a page-walk length of 4. */
3348 if (RT_UNLIKELY(!(g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4)))
3349 {
3350 pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
3351 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_EPT_PAGE_WALK_LENGTH_UNSUPPORTED;
3352 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3353 }
3354 }
3355 else
3356 {
3357 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
3358 pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NOT_SUPPORTED;
3359 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_EPT_INVEPT_UNAVAILABLE;
3360 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3361 }
3362 }
3363
3364 /*
3365 * Determine optimal flush type for VPID.
3366 */
3367 if (pVM->hmr0.s.vmx.fVpid)
3368 {
3369 if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
3370 {
3371 if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
3372 pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_SINGLE_CONTEXT;
3373 else if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
3374 pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_ALL_CONTEXTS;
3375 else
3376 {
3377 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
3378 if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
3379 LogRelFunc(("Only INDIV_ADDR supported. Ignoring VPID.\n"));
3380 if (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
3381 LogRelFunc(("Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
3382 pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED;
3383 pVM->hmr0.s.vmx.fVpid = false;
3384 }
3385 }
3386 else
3387 {
3388 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
3389 Log4Func(("VPID supported without INVEPT support. Ignoring VPID.\n"));
3390 pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED;
3391 pVM->hmr0.s.vmx.fVpid = false;
3392 }
3393 }
3394
3395 /*
3396 * Setup the handler for flushing tagged-TLBs.
3397 */
3398 if (pVM->hmr0.s.fNestedPaging && pVM->hmr0.s.vmx.fVpid)
3399 pVM->hmr0.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_EPT_VPID;
3400 else if (pVM->hmr0.s.fNestedPaging)
3401 pVM->hmr0.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_EPT;
3402 else if (pVM->hmr0.s.vmx.fVpid)
3403 pVM->hmr0.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_VPID;
3404 else
3405 pVM->hmr0.s.vmx.enmTlbFlushType = VMXTLBFLUSHTYPE_NONE;
3406
3407
3408 /*
3409 * Copy out the result to ring-3.
3410 */
3411 pVM->hm.s.ForR3.vmx.fVpid = pVM->hmr0.s.vmx.fVpid;
3412 pVM->hm.s.ForR3.vmx.enmTlbFlushType = pVM->hmr0.s.vmx.enmTlbFlushType;
3413 pVM->hm.s.ForR3.vmx.enmTlbFlushEpt = pVM->hmr0.s.vmx.enmTlbFlushEpt;
3414 pVM->hm.s.ForR3.vmx.enmTlbFlushVpid = pVM->hmr0.s.vmx.enmTlbFlushVpid;
3415 return VINF_SUCCESS;
3416}
3417
3418
3419/**
3420 * Sets up the LBR MSR ranges based on the host CPU.
3421 *
3422 * @returns VBox status code.
3423 * @param pVM The cross context VM structure.
3424 */
3425static int hmR0VmxSetupLbrMsrRange(PVMCC pVM)
3426{
3427 Assert(pVM->hmr0.s.vmx.fLbr);
3428 uint32_t idLbrFromIpMsrFirst;
3429 uint32_t idLbrFromIpMsrLast;
3430 uint32_t idLbrToIpMsrFirst;
3431 uint32_t idLbrToIpMsrLast;
3432 uint32_t idLbrTosMsr;
3433
3434 /*
3435 * Determine the LBR MSRs supported for this host CPU family and model.
3436 *
3437 * See Intel spec. 17.4.8 "LBR Stack".
3438 * See Intel "Model-Specific Registers" spec.
3439 */
3440 uint32_t const uFamilyModel = (pVM->cpum.ro.HostFeatures.uFamily << 8)
3441 | pVM->cpum.ro.HostFeatures.uModel;
3442 switch (uFamilyModel)
3443 {
3444 case 0x0f01: case 0x0f02:
3445 idLbrFromIpMsrFirst = MSR_P4_LASTBRANCH_0;
3446 idLbrFromIpMsrLast = MSR_P4_LASTBRANCH_3;
3447 idLbrToIpMsrFirst = 0x0;
3448 idLbrToIpMsrLast = 0x0;
3449 idLbrTosMsr = MSR_P4_LASTBRANCH_TOS;
3450 break;
3451
3452 case 0x065c: case 0x065f: case 0x064e: case 0x065e: case 0x068e:
3453 case 0x069e: case 0x0655: case 0x0666: case 0x067a: case 0x0667:
3454 case 0x066a: case 0x066c: case 0x067d: case 0x067e:
3455 idLbrFromIpMsrFirst = MSR_LASTBRANCH_0_FROM_IP;
3456 idLbrFromIpMsrLast = MSR_LASTBRANCH_31_FROM_IP;
3457 idLbrToIpMsrFirst = MSR_LASTBRANCH_0_TO_IP;
3458 idLbrToIpMsrLast = MSR_LASTBRANCH_31_TO_IP;
3459 idLbrTosMsr = MSR_LASTBRANCH_TOS;
3460 break;
3461
3462 case 0x063d: case 0x0647: case 0x064f: case 0x0656: case 0x063c:
3463 case 0x0645: case 0x0646: case 0x063f: case 0x062a: case 0x062d:
3464 case 0x063a: case 0x063e: case 0x061a: case 0x061e: case 0x061f:
3465 case 0x062e: case 0x0625: case 0x062c: case 0x062f:
3466 idLbrFromIpMsrFirst = MSR_LASTBRANCH_0_FROM_IP;
3467 idLbrFromIpMsrLast = MSR_LASTBRANCH_15_FROM_IP;
3468 idLbrToIpMsrFirst = MSR_LASTBRANCH_0_TO_IP;
3469 idLbrToIpMsrLast = MSR_LASTBRANCH_15_TO_IP;
3470 idLbrTosMsr = MSR_LASTBRANCH_TOS;
3471 break;
3472
3473 case 0x0617: case 0x061d: case 0x060f:
3474 idLbrFromIpMsrFirst = MSR_CORE2_LASTBRANCH_0_FROM_IP;
3475 idLbrFromIpMsrLast = MSR_CORE2_LASTBRANCH_3_FROM_IP;
3476 idLbrToIpMsrFirst = MSR_CORE2_LASTBRANCH_0_TO_IP;
3477 idLbrToIpMsrLast = MSR_CORE2_LASTBRANCH_3_TO_IP;
3478 idLbrTosMsr = MSR_CORE2_LASTBRANCH_TOS;
3479 break;
3480
3481 /* Atom and related microarchitectures we don't care about:
3482 case 0x0637: case 0x064a: case 0x064c: case 0x064d: case 0x065a:
3483 case 0x065d: case 0x061c: case 0x0626: case 0x0627: case 0x0635:
3484 case 0x0636: */
3485 /* All other CPUs: */
3486 default:
3487 {
3488 LogRelFunc(("Could not determine LBR stack size for the CPU model %#x\n", uFamilyModel));
3489 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_LBR_STACK_SIZE_UNKNOWN;
3490 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3491 }
3492 }
3493
3494 /*
3495 * Validate.
3496 */
3497 uint32_t const cLbrStack = idLbrFromIpMsrLast - idLbrFromIpMsrFirst + 1;
3498 PCVMCPU pVCpu0 = VMCC_GET_CPU_0(pVM);
3499 AssertCompile( RT_ELEMENTS(pVCpu0->hm.s.vmx.VmcsInfo.au64LbrFromIpMsr)
3500 == RT_ELEMENTS(pVCpu0->hm.s.vmx.VmcsInfo.au64LbrToIpMsr));
3501 if (cLbrStack > RT_ELEMENTS(pVCpu0->hm.s.vmx.VmcsInfo.au64LbrFromIpMsr))
3502 {
3503 LogRelFunc(("LBR stack size of the CPU (%u) exceeds our buffer size\n", cLbrStack));
3504 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_LBR_STACK_SIZE_OVERFLOW;
3505 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3506 }
3507 NOREF(pVCpu0);
3508
3509 /*
3510 * Update the LBR info. to the VM struct. for use later.
3511 */
3512 pVM->hmr0.s.vmx.idLbrTosMsr = idLbrTosMsr;
3513
3514 pVM->hm.s.ForR3.vmx.idLbrFromIpMsrFirst = pVM->hmr0.s.vmx.idLbrFromIpMsrFirst = idLbrFromIpMsrFirst;
3515 pVM->hm.s.ForR3.vmx.idLbrFromIpMsrLast = pVM->hmr0.s.vmx.idLbrFromIpMsrLast = idLbrFromIpMsrLast;
3516
3517 pVM->hm.s.ForR3.vmx.idLbrToIpMsrFirst = pVM->hmr0.s.vmx.idLbrToIpMsrFirst = idLbrToIpMsrFirst;
3518 pVM->hm.s.ForR3.vmx.idLbrToIpMsrLast = pVM->hmr0.s.vmx.idLbrToIpMsrLast = idLbrToIpMsrLast;
3519 return VINF_SUCCESS;
3520}
3521
3522
3523#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3524/**
3525 * Sets up the shadow VMCS fields arrays.
3526 *
3527 * This function builds arrays of VMCS fields to sync the shadow VMCS later while
3528 * executing the guest.
3529 *
3530 * @returns VBox status code.
3531 * @param pVM The cross context VM structure.
3532 */
3533static int hmR0VmxSetupShadowVmcsFieldsArrays(PVMCC pVM)
3534{
3535 /*
3536 * Paranoia. Ensure we haven't exposed the VMWRITE-All VMX feature to the guest
3537 * when the host does not support it.
3538 */
3539 bool const fGstVmwriteAll = pVM->cpum.ro.GuestFeatures.fVmxVmwriteAll;
3540 if ( !fGstVmwriteAll
3541 || (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL))
3542 { /* likely. */ }
3543 else
3544 {
3545 LogRelFunc(("VMX VMWRITE-All feature exposed to the guest but host CPU does not support it!\n"));
3546 VMCC_GET_CPU_0(pVM)->hm.s.u32HMError = VMX_UFC_GST_HOST_VMWRITE_ALL;
3547 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3548 }
3549
3550 uint32_t const cVmcsFields = RT_ELEMENTS(g_aVmcsFields);
3551 uint32_t cRwFields = 0;
3552 uint32_t cRoFields = 0;
3553 for (uint32_t i = 0; i < cVmcsFields; i++)
3554 {
3555 VMXVMCSFIELD VmcsField;
3556 VmcsField.u = g_aVmcsFields[i];
3557
3558 /*
3559 * We will be writing "FULL" (64-bit) fields while syncing the shadow VMCS.
3560 * Therefore, "HIGH" (32-bit portion of 64-bit) fields must not be included
3561 * in the shadow VMCS fields array as they would be redundant.
3562 *
3563 * If the VMCS field depends on a CPU feature that is not exposed to the guest,
3564 * we must not include it in the shadow VMCS fields array. Guests attempting to
3565 * VMREAD/VMWRITE such VMCS fields would cause a VM-exit and we shall emulate
3566 * the required behavior.
3567 */
3568 if ( VmcsField.n.fAccessType == VMX_VMCSFIELD_ACCESS_FULL
3569 && CPUMIsGuestVmxVmcsFieldValid(pVM, VmcsField.u))
3570 {
3571 /*
3572 * Read-only fields are placed in a separate array so that while syncing shadow
3573 * VMCS fields later (which is more performance critical) we can avoid branches.
3574 *
3575 * However, if the guest can write to all fields (including read-only fields),
3576 * we treat it a as read/write field. Otherwise, writing to these fields would
3577 * cause a VMWRITE instruction error while syncing the shadow VMCS.
3578 */
3579 if ( fGstVmwriteAll
3580 || !VMXIsVmcsFieldReadOnly(VmcsField.u))
3581 pVM->hmr0.s.vmx.paShadowVmcsFields[cRwFields++] = VmcsField.u;
3582 else
3583 pVM->hmr0.s.vmx.paShadowVmcsRoFields[cRoFields++] = VmcsField.u;
3584 }
3585 }
3586
3587 /* Update the counts. */
3588 pVM->hmr0.s.vmx.cShadowVmcsFields = cRwFields;
3589 pVM->hmr0.s.vmx.cShadowVmcsRoFields = cRoFields;
3590 return VINF_SUCCESS;
3591}
3592
3593
3594/**
3595 * Sets up the VMREAD and VMWRITE bitmaps.
3596 *
3597 * @param pVM The cross context VM structure.
3598 */
3599static void hmR0VmxSetupVmreadVmwriteBitmaps(PVMCC pVM)
3600{
3601 /*
3602 * By default, ensure guest attempts to access any VMCS fields cause VM-exits.
3603 */
3604 uint32_t const cbBitmap = X86_PAGE_4K_SIZE;
3605 uint8_t *pbVmreadBitmap = (uint8_t *)pVM->hmr0.s.vmx.pvVmreadBitmap;
3606 uint8_t *pbVmwriteBitmap = (uint8_t *)pVM->hmr0.s.vmx.pvVmwriteBitmap;
3607 ASMMemFill32(pbVmreadBitmap, cbBitmap, UINT32_C(0xffffffff));
3608 ASMMemFill32(pbVmwriteBitmap, cbBitmap, UINT32_C(0xffffffff));
3609
3610 /*
3611 * Skip intercepting VMREAD/VMWRITE to guest read/write fields in the
3612 * VMREAD and VMWRITE bitmaps.
3613 */
3614 {
3615 uint32_t const *paShadowVmcsFields = pVM->hmr0.s.vmx.paShadowVmcsFields;
3616 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
3617 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
3618 {
3619 uint32_t const uVmcsField = paShadowVmcsFields[i];
3620 Assert(!(uVmcsField & VMX_VMCSFIELD_RSVD_MASK));
3621 Assert(uVmcsField >> 3 < cbBitmap);
3622 ASMBitClear(pbVmreadBitmap + (uVmcsField >> 3), uVmcsField & 7);
3623 ASMBitClear(pbVmwriteBitmap + (uVmcsField >> 3), uVmcsField & 7);
3624 }
3625 }
3626
3627 /*
3628 * Skip intercepting VMREAD for guest read-only fields in the VMREAD bitmap
3629 * if the host supports VMWRITE to all supported VMCS fields.
3630 */
3631 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
3632 {
3633 uint32_t const *paShadowVmcsRoFields = pVM->hmr0.s.vmx.paShadowVmcsRoFields;
3634 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
3635 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
3636 {
3637 uint32_t const uVmcsField = paShadowVmcsRoFields[i];
3638 Assert(!(uVmcsField & VMX_VMCSFIELD_RSVD_MASK));
3639 Assert(uVmcsField >> 3 < cbBitmap);
3640 ASMBitClear(pbVmreadBitmap + (uVmcsField >> 3), uVmcsField & 7);
3641 }
3642 }
3643}
3644#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
3645
3646
3647/**
3648 * Sets up the virtual-APIC page address for the VMCS.
3649 *
3650 * @param pVmcsInfo The VMCS info. object.
3651 */
3652DECLINLINE(void) hmR0VmxSetupVmcsVirtApicAddr(PCVMXVMCSINFO pVmcsInfo)
3653{
3654 RTHCPHYS const HCPhysVirtApic = pVmcsInfo->HCPhysVirtApic;
3655 Assert(HCPhysVirtApic != NIL_RTHCPHYS);
3656 Assert(!(HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
3657 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, HCPhysVirtApic);
3658 AssertRC(rc);
3659}
3660
3661
3662/**
3663 * Sets up the MSR-bitmap address for the VMCS.
3664 *
3665 * @param pVmcsInfo The VMCS info. object.
3666 */
3667DECLINLINE(void) hmR0VmxSetupVmcsMsrBitmapAddr(PCVMXVMCSINFO pVmcsInfo)
3668{
3669 RTHCPHYS const HCPhysMsrBitmap = pVmcsInfo->HCPhysMsrBitmap;
3670 Assert(HCPhysMsrBitmap != NIL_RTHCPHYS);
3671 Assert(!(HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
3672 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, HCPhysMsrBitmap);
3673 AssertRC(rc);
3674}
3675
3676
3677/**
3678 * Sets up the APIC-access page address for the VMCS.
3679 *
3680 * @param pVCpu The cross context virtual CPU structure.
3681 */
3682DECLINLINE(void) hmR0VmxSetupVmcsApicAccessAddr(PVMCPUCC pVCpu)
3683{
3684 RTHCPHYS const HCPhysApicAccess = pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.HCPhysApicAccess;
3685 Assert(HCPhysApicAccess != NIL_RTHCPHYS);
3686 Assert(!(HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
3687 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, HCPhysApicAccess);
3688 AssertRC(rc);
3689}
3690
3691#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3692
3693/**
3694 * Sets up the VMREAD bitmap address for the VMCS.
3695 *
3696 * @param pVCpu The cross context virtual CPU structure.
3697 */
3698DECLINLINE(void) hmR0VmxSetupVmcsVmreadBitmapAddr(PVMCPUCC pVCpu)
3699{
3700 RTHCPHYS const HCPhysVmreadBitmap = pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.HCPhysVmreadBitmap;
3701 Assert(HCPhysVmreadBitmap != NIL_RTHCPHYS);
3702 Assert(!(HCPhysVmreadBitmap & 0xfff)); /* Bits 11:0 MBZ. */
3703 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL, HCPhysVmreadBitmap);
3704 AssertRC(rc);
3705}
3706
3707
3708/**
3709 * Sets up the VMWRITE bitmap address for the VMCS.
3710 *
3711 * @param pVCpu The cross context virtual CPU structure.
3712 */
3713DECLINLINE(void) hmR0VmxSetupVmcsVmwriteBitmapAddr(PVMCPUCC pVCpu)
3714{
3715 RTHCPHYS const HCPhysVmwriteBitmap = pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.HCPhysVmwriteBitmap;
3716 Assert(HCPhysVmwriteBitmap != NIL_RTHCPHYS);
3717 Assert(!(HCPhysVmwriteBitmap & 0xfff)); /* Bits 11:0 MBZ. */
3718 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL, HCPhysVmwriteBitmap);
3719 AssertRC(rc);
3720}
3721
3722#endif
3723
3724/**
3725 * Sets up the VM-entry MSR load, VM-exit MSR-store and VM-exit MSR-load addresses
3726 * in the VMCS.
3727 *
3728 * @returns VBox status code.
3729 * @param pVmcsInfo The VMCS info. object.
3730 */
3731DECLINLINE(int) hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(PVMXVMCSINFO pVmcsInfo)
3732{
3733 RTHCPHYS const HCPhysGuestMsrLoad = pVmcsInfo->HCPhysGuestMsrLoad;
3734 Assert(HCPhysGuestMsrLoad != NIL_RTHCPHYS);
3735 Assert(!(HCPhysGuestMsrLoad & 0xf)); /* Bits 3:0 MBZ. */
3736
3737 RTHCPHYS const HCPhysGuestMsrStore = pVmcsInfo->HCPhysGuestMsrStore;
3738 Assert(HCPhysGuestMsrStore != NIL_RTHCPHYS);
3739 Assert(!(HCPhysGuestMsrStore & 0xf)); /* Bits 3:0 MBZ. */
3740
3741 RTHCPHYS const HCPhysHostMsrLoad = pVmcsInfo->HCPhysHostMsrLoad;
3742 Assert(HCPhysHostMsrLoad != NIL_RTHCPHYS);
3743 Assert(!(HCPhysHostMsrLoad & 0xf)); /* Bits 3:0 MBZ. */
3744
3745 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, HCPhysGuestMsrLoad); AssertRC(rc);
3746 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, HCPhysGuestMsrStore); AssertRC(rc);
3747 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, HCPhysHostMsrLoad); AssertRC(rc);
3748 return VINF_SUCCESS;
3749}
3750
3751
3752/**
3753 * Sets up MSR permissions in the MSR bitmap of a VMCS info. object.
3754 *
3755 * @param pVCpu The cross context virtual CPU structure.
3756 * @param pVmcsInfo The VMCS info. object.
3757 */
3758static void hmR0VmxSetupVmcsMsrPermissions(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
3759{
3760 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS);
3761
3762 /*
3763 * By default, ensure guest attempts to access any MSR cause VM-exits.
3764 * This shall later be relaxed for specific MSRs as necessary.
3765 *
3766 * Note: For nested-guests, the entire bitmap will be merged prior to
3767 * executing the nested-guest using hardware-assisted VMX and hence there
3768 * is no need to perform this operation. See hmR0VmxMergeMsrBitmapNested.
3769 */
3770 Assert(pVmcsInfo->pvMsrBitmap);
3771 ASMMemFill32(pVmcsInfo->pvMsrBitmap, X86_PAGE_4K_SIZE, UINT32_C(0xffffffff));
3772
3773 /*
3774 * The guest can access the following MSRs (read, write) without causing
3775 * VM-exits; they are loaded/stored automatically using fields in the VMCS.
3776 */
3777 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3778 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SYSENTER_CS, VMXMSRPM_ALLOW_RD_WR);
3779 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SYSENTER_ESP, VMXMSRPM_ALLOW_RD_WR);
3780 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SYSENTER_EIP, VMXMSRPM_ALLOW_RD_WR);
3781 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_GS_BASE, VMXMSRPM_ALLOW_RD_WR);
3782 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_FS_BASE, VMXMSRPM_ALLOW_RD_WR);
3783
3784 /*
3785 * The IA32_PRED_CMD and IA32_FLUSH_CMD MSRs are write-only and has no state
3786 * associated with then. We never need to intercept access (writes need to be
3787 * executed without causing a VM-exit, reads will #GP fault anyway).
3788 *
3789 * The IA32_SPEC_CTRL MSR is read/write and has state. We allow the guest to
3790 * read/write them. We swap the guest/host MSR value using the
3791 * auto-load/store MSR area.
3792 */
3793 if (pVM->cpum.ro.GuestFeatures.fIbpb)
3794 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_PRED_CMD, VMXMSRPM_ALLOW_RD_WR);
3795 if (pVM->cpum.ro.GuestFeatures.fFlushCmd)
3796 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_FLUSH_CMD, VMXMSRPM_ALLOW_RD_WR);
3797 if (pVM->cpum.ro.GuestFeatures.fIbrs)
3798 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_IA32_SPEC_CTRL, VMXMSRPM_ALLOW_RD_WR);
3799
3800 /*
3801 * Allow full read/write access for the following MSRs (mandatory for VT-x)
3802 * required for 64-bit guests.
3803 */
3804 if (pVM->hmr0.s.fAllow64BitGuests)
3805 {
3806 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_LSTAR, VMXMSRPM_ALLOW_RD_WR);
3807 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K6_STAR, VMXMSRPM_ALLOW_RD_WR);
3808 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_SF_MASK, VMXMSRPM_ALLOW_RD_WR);
3809 hmR0VmxSetMsrPermission(pVCpu, pVmcsInfo, false, MSR_K8_KERNEL_GS_BASE, VMXMSRPM_ALLOW_RD_WR);
3810 }
3811
3812 /*
3813 * IA32_EFER MSR is always intercepted, see @bugref{9180#c37}.
3814 */
3815#ifdef VBOX_STRICT
3816 Assert(pVmcsInfo->pvMsrBitmap);
3817 uint32_t const fMsrpmEfer = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, MSR_K6_EFER);
3818 Assert(fMsrpmEfer == VMXMSRPM_EXIT_RD_WR);
3819#endif
3820}
3821
3822
3823/**
3824 * Sets up pin-based VM-execution controls in the VMCS.
3825 *
3826 * @returns VBox status code.
3827 * @param pVCpu The cross context virtual CPU structure.
3828 * @param pVmcsInfo The VMCS info. object.
3829 */
3830static int hmR0VmxSetupVmcsPinCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
3831{
3832 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3833 uint32_t fVal = g_HmMsrs.u.vmx.PinCtls.n.allowed0; /* Bits set here must always be set. */
3834 uint32_t const fZap = g_HmMsrs.u.vmx.PinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
3835
3836 fVal |= VMX_PIN_CTLS_EXT_INT_EXIT /* External interrupts cause a VM-exit. */
3837 | VMX_PIN_CTLS_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */
3838
3839 if (g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_VIRT_NMI)
3840 fVal |= VMX_PIN_CTLS_VIRT_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */
3841
3842 /* Enable the VMX-preemption timer. */
3843 if (pVM->hmr0.s.vmx.fUsePreemptTimer)
3844 {
3845 Assert(g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_PREEMPT_TIMER);
3846 fVal |= VMX_PIN_CTLS_PREEMPT_TIMER;
3847 }
3848
3849#if 0
3850 /* Enable posted-interrupt processing. */
3851 if (pVM->hm.s.fPostedIntrs)
3852 {
3853 Assert(g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_POSTED_INT);
3854 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_ACK_EXT_INT);
3855 fVal |= VMX_PIN_CTLS_POSTED_INT;
3856 }
3857#endif
3858
3859 if ((fVal & fZap) != fVal)
3860 {
3861 LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
3862 g_HmMsrs.u.vmx.PinCtls.n.allowed0, fVal, fZap));
3863 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
3864 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3865 }
3866
3867 /* Commit it to the VMCS and update our cache. */
3868 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, fVal);
3869 AssertRC(rc);
3870 pVmcsInfo->u32PinCtls = fVal;
3871
3872 return VINF_SUCCESS;
3873}
3874
3875
3876/**
3877 * Sets up secondary processor-based VM-execution controls in the VMCS.
3878 *
3879 * @returns VBox status code.
3880 * @param pVCpu The cross context virtual CPU structure.
3881 * @param pVmcsInfo The VMCS info. object.
3882 */
3883static int hmR0VmxSetupVmcsProcCtls2(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
3884{
3885 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3886 uint32_t fVal = g_HmMsrs.u.vmx.ProcCtls2.n.allowed0; /* Bits set here must be set in the VMCS. */
3887 uint32_t const fZap = g_HmMsrs.u.vmx.ProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3888
3889 /* WBINVD causes a VM-exit. */
3890 if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_WBINVD_EXIT)
3891 fVal |= VMX_PROC_CTLS2_WBINVD_EXIT;
3892
3893 /* Enable EPT (aka nested-paging). */
3894 if (pVM->hmr0.s.fNestedPaging)
3895 fVal |= VMX_PROC_CTLS2_EPT;
3896
3897 /* Enable the INVPCID instruction if we expose it to the guest and is supported
3898 by the hardware. Without this, guest executing INVPCID would cause a #UD. */
3899 if ( pVM->cpum.ro.GuestFeatures.fInvpcid
3900 && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_INVPCID))
3901 fVal |= VMX_PROC_CTLS2_INVPCID;
3902
3903 /* Enable VPID. */
3904 if (pVM->hmr0.s.vmx.fVpid)
3905 fVal |= VMX_PROC_CTLS2_VPID;
3906
3907 /* Enable unrestricted guest execution. */
3908 if (pVM->hmr0.s.vmx.fUnrestrictedGuest)
3909 fVal |= VMX_PROC_CTLS2_UNRESTRICTED_GUEST;
3910
3911#if 0
3912 if (pVM->hm.s.fVirtApicRegs)
3913 {
3914 /* Enable APIC-register virtualization. */
3915 Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_APIC_REG_VIRT);
3916 fVal |= VMX_PROC_CTLS2_APIC_REG_VIRT;
3917
3918 /* Enable virtual-interrupt delivery. */
3919 Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_INTR_DELIVERY);
3920 fVal |= VMX_PROC_CTLS2_VIRT_INTR_DELIVERY;
3921 }
3922#endif
3923
3924 /* Virtualize-APIC accesses if supported by the CPU. The virtual-APIC page is
3925 where the TPR shadow resides. */
3926 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
3927 * done dynamically. */
3928 if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
3929 {
3930 fVal |= VMX_PROC_CTLS2_VIRT_APIC_ACCESS;
3931 hmR0VmxSetupVmcsApicAccessAddr(pVCpu);
3932 }
3933
3934 /* Enable the RDTSCP instruction if we expose it to the guest and is supported
3935 by the hardware. Without this, guest executing RDTSCP would cause a #UD. */
3936 if ( pVM->cpum.ro.GuestFeatures.fRdTscP
3937 && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_RDTSCP))
3938 fVal |= VMX_PROC_CTLS2_RDTSCP;
3939
3940 /* Enable Pause-Loop exiting. */
3941 if ( (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)
3942 && pVM->hm.s.vmx.cPleGapTicks
3943 && pVM->hm.s.vmx.cPleWindowTicks)
3944 {
3945 fVal |= VMX_PROC_CTLS2_PAUSE_LOOP_EXIT;
3946
3947 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks); AssertRC(rc);
3948 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks); AssertRC(rc);
3949 }
3950
3951 if ((fVal & fZap) != fVal)
3952 {
3953 LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
3954 g_HmMsrs.u.vmx.ProcCtls2.n.allowed0, fVal, fZap));
3955 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
3956 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3957 }
3958
3959 /* Commit it to the VMCS and update our cache. */
3960 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, fVal);
3961 AssertRC(rc);
3962 pVmcsInfo->u32ProcCtls2 = fVal;
3963
3964 return VINF_SUCCESS;
3965}
3966
3967
3968/**
3969 * Sets up processor-based VM-execution controls in the VMCS.
3970 *
3971 * @returns VBox status code.
3972 * @param pVCpu The cross context virtual CPU structure.
3973 * @param pVmcsInfo The VMCS info. object.
3974 */
3975static int hmR0VmxSetupVmcsProcCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
3976{
3977 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3978 uint32_t fVal = g_HmMsrs.u.vmx.ProcCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
3979 uint32_t const fZap = g_HmMsrs.u.vmx.ProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
3980
3981 fVal |= VMX_PROC_CTLS_HLT_EXIT /* HLT causes a VM-exit. */
3982 | VMX_PROC_CTLS_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
3983 | VMX_PROC_CTLS_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
3984 | VMX_PROC_CTLS_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
3985 | VMX_PROC_CTLS_RDPMC_EXIT /* RDPMC causes a VM-exit. */
3986 | VMX_PROC_CTLS_MONITOR_EXIT /* MONITOR causes a VM-exit. */
3987 | VMX_PROC_CTLS_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
3988
3989 /* We toggle VMX_PROC_CTLS_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
3990 if ( !(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MOV_DR_EXIT)
3991 || (g_HmMsrs.u.vmx.ProcCtls.n.allowed0 & VMX_PROC_CTLS_MOV_DR_EXIT))
3992 {
3993 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
3994 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3995 }
3996
3997 /* Without nested paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
3998 if (!pVM->hmr0.s.fNestedPaging)
3999 {
4000 Assert(!pVM->hmr0.s.vmx.fUnrestrictedGuest);
4001 fVal |= VMX_PROC_CTLS_INVLPG_EXIT
4002 | VMX_PROC_CTLS_CR3_LOAD_EXIT
4003 | VMX_PROC_CTLS_CR3_STORE_EXIT;
4004 }
4005
4006 /* Use TPR shadowing if supported by the CPU. */
4007 if ( PDMHasApic(pVM)
4008 && (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW))
4009 {
4010 fVal |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
4011 /* CR8 writes cause a VM-exit based on TPR threshold. */
4012 Assert(!(fVal & VMX_PROC_CTLS_CR8_STORE_EXIT));
4013 Assert(!(fVal & VMX_PROC_CTLS_CR8_LOAD_EXIT));
4014 hmR0VmxSetupVmcsVirtApicAddr(pVmcsInfo);
4015 }
4016 else
4017 {
4018 /* Some 32-bit CPUs do not support CR8 load/store exiting as MOV CR8 is
4019 invalid on 32-bit Intel CPUs. Set this control only for 64-bit guests. */
4020 if (pVM->hmr0.s.fAllow64BitGuests)
4021 fVal |= VMX_PROC_CTLS_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */
4022 | VMX_PROC_CTLS_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */
4023 }
4024
4025 /* Use MSR-bitmaps if supported by the CPU. */
4026 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
4027 {
4028 fVal |= VMX_PROC_CTLS_USE_MSR_BITMAPS;
4029 hmR0VmxSetupVmcsMsrBitmapAddr(pVmcsInfo);
4030 }
4031
4032 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
4033 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4034 fVal |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
4035
4036 if ((fVal & fZap) != fVal)
4037 {
4038 LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
4039 g_HmMsrs.u.vmx.ProcCtls.n.allowed0, fVal, fZap));
4040 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
4041 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
4042 }
4043
4044 /* Commit it to the VMCS and update our cache. */
4045 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, fVal);
4046 AssertRC(rc);
4047 pVmcsInfo->u32ProcCtls = fVal;
4048
4049 /* Set up MSR permissions that don't change through the lifetime of the VM. */
4050 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
4051 hmR0VmxSetupVmcsMsrPermissions(pVCpu, pVmcsInfo);
4052
4053 /* Set up secondary processor-based VM-execution controls if the CPU supports it. */
4054 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4055 return hmR0VmxSetupVmcsProcCtls2(pVCpu, pVmcsInfo);
4056
4057 /* Sanity check, should not really happen. */
4058 if (RT_LIKELY(!pVM->hmr0.s.vmx.fUnrestrictedGuest))
4059 { /* likely */ }
4060 else
4061 {
4062 pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;
4063 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
4064 }
4065
4066 /* Old CPUs without secondary processor-based VM-execution controls would end up here. */
4067 return VINF_SUCCESS;
4068}
4069
4070
4071/**
4072 * Sets up miscellaneous (everything other than Pin, Processor and secondary
4073 * Processor-based VM-execution) control fields in the VMCS.
4074 *
4075 * @returns VBox status code.
4076 * @param pVCpu The cross context virtual CPU structure.
4077 * @param pVmcsInfo The VMCS info. object.
4078 */
4079static int hmR0VmxSetupVmcsMiscCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4080{
4081#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4082 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUseVmcsShadowing)
4083 {
4084 hmR0VmxSetupVmcsVmreadBitmapAddr(pVCpu);
4085 hmR0VmxSetupVmcsVmwriteBitmapAddr(pVCpu);
4086 }
4087#endif
4088
4089 Assert(pVmcsInfo->u64VmcsLinkPtr == NIL_RTHCPHYS);
4090 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS);
4091 AssertRC(rc);
4092
4093 rc = hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(pVmcsInfo);
4094 if (RT_SUCCESS(rc))
4095 {
4096 uint64_t const u64Cr0Mask = hmR0VmxGetFixedCr0Mask(pVCpu);
4097 uint64_t const u64Cr4Mask = hmR0VmxGetFixedCr4Mask(pVCpu);
4098
4099 rc = VMXWriteVmcsNw(VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask); AssertRC(rc);
4100 rc = VMXWriteVmcsNw(VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask); AssertRC(rc);
4101
4102 pVmcsInfo->u64Cr0Mask = u64Cr0Mask;
4103 pVmcsInfo->u64Cr4Mask = u64Cr4Mask;
4104
4105 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fLbr)
4106 {
4107 rc = VMXWriteVmcsNw(VMX_VMCS64_GUEST_DEBUGCTL_FULL, MSR_IA32_DEBUGCTL_LBR);
4108 AssertRC(rc);
4109 }
4110 return VINF_SUCCESS;
4111 }
4112 else
4113 LogRelFunc(("Failed to initialize VMCS auto-load/store MSR addresses. rc=%Rrc\n", rc));
4114 return rc;
4115}
4116
4117
4118/**
4119 * Sets up the initial exception bitmap in the VMCS based on static conditions.
4120 *
4121 * We shall setup those exception intercepts that don't change during the
4122 * lifetime of the VM here. The rest are done dynamically while loading the
4123 * guest state.
4124 *
4125 * @param pVCpu The cross context virtual CPU structure.
4126 * @param pVmcsInfo The VMCS info. object.
4127 */
4128static void hmR0VmxSetupVmcsXcptBitmap(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
4129{
4130 /*
4131 * The following exceptions are always intercepted:
4132 *
4133 * #AC - To prevent the guest from hanging the CPU and for dealing with
4134 * split-lock detecting host configs.
4135 * #DB - To maintain the DR6 state even when intercepting DRx reads/writes and
4136 * recursive #DBs can cause a CPU hang.
4137 * #PF - To sync our shadow page tables when nested-paging is not used.
4138 */
4139 bool const fNestedPaging = pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging;
4140 uint32_t const uXcptBitmap = RT_BIT(X86_XCPT_AC)
4141 | RT_BIT(X86_XCPT_DB)
4142 | (fNestedPaging ? 0 : RT_BIT(X86_XCPT_PF));
4143
4144 /* Commit it to the VMCS. */
4145 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
4146 AssertRC(rc);
4147
4148 /* Update our cache of the exception bitmap. */
4149 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
4150}
4151
4152
4153#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4154/**
4155 * Sets up the VMCS for executing a nested-guest using hardware-assisted VMX.
4156 *
4157 * @returns VBox status code.
4158 * @param pVmcsInfo The VMCS info. object.
4159 */
4160static int hmR0VmxSetupVmcsCtlsNested(PVMXVMCSINFO pVmcsInfo)
4161{
4162 Assert(pVmcsInfo->u64VmcsLinkPtr == NIL_RTHCPHYS);
4163 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS);
4164 AssertRC(rc);
4165
4166 rc = hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(pVmcsInfo);
4167 if (RT_SUCCESS(rc))
4168 {
4169 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)
4170 hmR0VmxSetupVmcsMsrBitmapAddr(pVmcsInfo);
4171
4172 /* Paranoia - We've not yet initialized these, they shall be done while merging the VMCS. */
4173 Assert(!pVmcsInfo->u64Cr0Mask);
4174 Assert(!pVmcsInfo->u64Cr4Mask);
4175 return VINF_SUCCESS;
4176 }
4177 LogRelFunc(("Failed to set up the VMCS link pointer in the nested-guest VMCS. rc=%Rrc\n", rc));
4178 return rc;
4179}
4180#endif
4181
4182
4183/**
4184 * Sets pfnStartVm to the best suited variant.
4185 *
4186 * This must be called whenever anything changes relative to the hmR0VmXStartVm
4187 * variant selection:
4188 * - pVCpu->hm.s.fLoadSaveGuestXcr0
4189 * - HM_WSF_IBPB_ENTRY in pVCpu->hmr0.s.fWorldSwitcher
4190 * - HM_WSF_IBPB_EXIT in pVCpu->hmr0.s.fWorldSwitcher
4191 * - Perhaps: CPUMIsGuestFPUStateActive() (windows only)
4192 * - Perhaps: CPUMCTX.fXStateMask (windows only)
4193 *
4194 * We currently ASSUME that neither HM_WSF_IBPB_ENTRY nor HM_WSF_IBPB_EXIT
4195 * cannot be changed at runtime.
4196 */
4197static void hmR0VmxUpdateStartVmFunction(PVMCPUCC pVCpu)
4198{
4199 static const struct CLANGWORKAROUND { PFNHMVMXSTARTVM pfn; } s_aHmR0VmxStartVmFunctions[] =
4200 {
4201 { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },
4202 { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },
4203 { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },
4204 { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_SansIbpbExit },
4205 { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },
4206 { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },
4207 { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },
4208 { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_SansIbpbExit },
4209 { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },
4210 { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },
4211 { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },
4212 { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_SansIbpbExit },
4213 { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },
4214 { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },
4215 { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },
4216 { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_SansIbpbExit },
4217 { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },
4218 { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },
4219 { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },
4220 { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_SansMdsEntry_WithIbpbExit },
4221 { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },
4222 { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },
4223 { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },
4224 { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_SansMdsEntry_WithIbpbExit },
4225 { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },
4226 { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },
4227 { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },
4228 { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_SansL1dEntry_WithMdsEntry_WithIbpbExit },
4229 { hmR0VmxStartVm_SansXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },
4230 { hmR0VmxStartVm_WithXcr0_SansIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },
4231 { hmR0VmxStartVm_SansXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },
4232 { hmR0VmxStartVm_WithXcr0_WithIbpbEntry_WithL1dEntry_WithMdsEntry_WithIbpbExit },
4233 };
4234 uintptr_t const idx = (pVCpu->hmr0.s.fLoadSaveGuestXcr0 ? 1 : 0)
4235 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_ENTRY ? 2 : 0)
4236 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_L1D_ENTRY ? 4 : 0)
4237 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_MDS_ENTRY ? 8 : 0)
4238 | (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_IBPB_EXIT ? 16 : 0);
4239 PFNHMVMXSTARTVM const pfnStartVm = s_aHmR0VmxStartVmFunctions[idx].pfn;
4240 if (pVCpu->hmr0.s.vmx.pfnStartVm != pfnStartVm)
4241 pVCpu->hmr0.s.vmx.pfnStartVm = pfnStartVm;
4242}
4243
4244
4245/**
4246 * Selector FNHMSVMVMRUN implementation.
4247 */
4248static DECLCALLBACK(int) hmR0VmxStartVmSelector(PVMXVMCSINFO pVmcsInfo, PVMCPUCC pVCpu, bool fResume)
4249{
4250 hmR0VmxUpdateStartVmFunction(pVCpu);
4251 return pVCpu->hmr0.s.vmx.pfnStartVm(pVmcsInfo, pVCpu, fResume);
4252}
4253
4254
4255/**
4256 * Sets up the VMCS for executing a guest (or nested-guest) using hardware-assisted
4257 * VMX.
4258 *
4259 * @returns VBox status code.
4260 * @param pVCpu The cross context virtual CPU structure.
4261 * @param pVmcsInfo The VMCS info. object.
4262 * @param fIsNstGstVmcs Whether this is a nested-guest VMCS.
4263 */
4264static int hmR0VmxSetupVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs)
4265{
4266 Assert(pVmcsInfo->pvVmcs);
4267 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4268
4269 /* Set the CPU specified revision identifier at the beginning of the VMCS structure. */
4270 *(uint32_t *)pVmcsInfo->pvVmcs = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID);
4271 const char * const pszVmcs = fIsNstGstVmcs ? "nested-guest VMCS" : "guest VMCS";
4272
4273 LogFlowFunc(("\n"));
4274
4275 /*
4276 * Initialize the VMCS using VMCLEAR before loading the VMCS.
4277 * See Intel spec. 31.6 "Preparation And Launching A Virtual Machine".
4278 */
4279 int rc = hmR0VmxClearVmcs(pVmcsInfo);
4280 if (RT_SUCCESS(rc))
4281 {
4282 rc = hmR0VmxLoadVmcs(pVmcsInfo);
4283 if (RT_SUCCESS(rc))
4284 {
4285 /*
4286 * Initialize the hardware-assisted VMX execution handler for guest and nested-guest VMCS.
4287 * The host is always 64-bit since we no longer support 32-bit hosts.
4288 * Currently we have just a single handler for all guest modes as well, see @bugref{6208#c73}.
4289 */
4290 if (!fIsNstGstVmcs)
4291 {
4292 rc = hmR0VmxSetupVmcsPinCtls(pVCpu, pVmcsInfo);
4293 if (RT_SUCCESS(rc))
4294 {
4295 rc = hmR0VmxSetupVmcsProcCtls(pVCpu, pVmcsInfo);
4296 if (RT_SUCCESS(rc))
4297 {
4298 rc = hmR0VmxSetupVmcsMiscCtls(pVCpu, pVmcsInfo);
4299 if (RT_SUCCESS(rc))
4300 {
4301 hmR0VmxSetupVmcsXcptBitmap(pVCpu, pVmcsInfo);
4302#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4303 /*
4304 * If a shadow VMCS is allocated for the VMCS info. object, initialize the
4305 * VMCS revision ID and shadow VMCS indicator bit. Also, clear the VMCS
4306 * making it fit for use when VMCS shadowing is later enabled.
4307 */
4308 if (pVmcsInfo->pvShadowVmcs)
4309 {
4310 VMXVMCSREVID VmcsRevId;
4311 VmcsRevId.u = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID);
4312 VmcsRevId.n.fIsShadowVmcs = 1;
4313 *(uint32_t *)pVmcsInfo->pvShadowVmcs = VmcsRevId.u;
4314 rc = hmR0VmxClearShadowVmcs(pVmcsInfo);
4315 if (RT_SUCCESS(rc))
4316 { /* likely */ }
4317 else
4318 LogRelFunc(("Failed to initialize shadow VMCS. rc=%Rrc\n", rc));
4319 }
4320#endif
4321 }
4322 else
4323 LogRelFunc(("Failed to setup miscellaneous controls. rc=%Rrc\n", rc));
4324 }
4325 else
4326 LogRelFunc(("Failed to setup processor-based VM-execution controls. rc=%Rrc\n", rc));
4327 }
4328 else
4329 LogRelFunc(("Failed to setup pin-based controls. rc=%Rrc\n", rc));
4330 }
4331 else
4332 {
4333#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4334 rc = hmR0VmxSetupVmcsCtlsNested(pVmcsInfo);
4335 if (RT_SUCCESS(rc))
4336 { /* likely */ }
4337 else
4338 LogRelFunc(("Failed to initialize nested-guest VMCS. rc=%Rrc\n", rc));
4339#else
4340 AssertFailed();
4341#endif
4342 }
4343 }
4344 else
4345 LogRelFunc(("Failed to load the %s. rc=%Rrc\n", rc, pszVmcs));
4346 }
4347 else
4348 LogRelFunc(("Failed to clear the %s. rc=%Rrc\n", rc, pszVmcs));
4349
4350 /* Sync any CPU internal VMCS data back into our VMCS in memory. */
4351 if (RT_SUCCESS(rc))
4352 {
4353 rc = hmR0VmxClearVmcs(pVmcsInfo);
4354 if (RT_SUCCESS(rc))
4355 { /* likely */ }
4356 else
4357 LogRelFunc(("Failed to clear the %s post setup. rc=%Rrc\n", rc, pszVmcs));
4358 }
4359
4360 /*
4361 * Update the last-error record both for failures and success, so we
4362 * can propagate the status code back to ring-3 for diagnostics.
4363 */
4364 hmR0VmxUpdateErrorRecord(pVCpu, rc);
4365 NOREF(pszVmcs);
4366 return rc;
4367}
4368
4369
4370/**
4371 * Does global VT-x initialization (called during module initialization).
4372 *
4373 * @returns VBox status code.
4374 */
4375VMMR0DECL(int) VMXR0GlobalInit(void)
4376{
4377#ifdef HMVMX_USE_FUNCTION_TABLE
4378 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_aVMExitHandlers));
4379# ifdef VBOX_STRICT
4380 for (unsigned i = 0; i < RT_ELEMENTS(g_aVMExitHandlers); i++)
4381 Assert(g_aVMExitHandlers[i].pfn);
4382# endif
4383#endif
4384 return VINF_SUCCESS;
4385}
4386
4387
4388/**
4389 * Does global VT-x termination (called during module termination).
4390 */
4391VMMR0DECL(void) VMXR0GlobalTerm()
4392{
4393 /* Nothing to do currently. */
4394}
4395
4396
4397/**
4398 * Sets up and activates VT-x on the current CPU.
4399 *
4400 * @returns VBox status code.
4401 * @param pHostCpu The HM physical-CPU structure.
4402 * @param pVM The cross context VM structure. Can be
4403 * NULL after a host resume operation.
4404 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
4405 * fEnabledByHost is @c true).
4406 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
4407 * @a fEnabledByHost is @c true).
4408 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
4409 * enable VT-x on the host.
4410 * @param pHwvirtMsrs Pointer to the hardware-virtualization MSRs.
4411 */
4412VMMR0DECL(int) VMXR0EnableCpu(PHMPHYSCPU pHostCpu, PVMCC pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
4413 PCSUPHWVIRTMSRS pHwvirtMsrs)
4414{
4415 AssertPtr(pHostCpu);
4416 AssertPtr(pHwvirtMsrs);
4417 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4418
4419 /* Enable VT-x if it's not already enabled by the host. */
4420 if (!fEnabledByHost)
4421 {
4422 int rc = hmR0VmxEnterRootMode(pHostCpu, pVM, HCPhysCpuPage, pvCpuPage);
4423 if (RT_FAILURE(rc))
4424 return rc;
4425 }
4426
4427 /*
4428 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been
4429 * using EPTPs) so we don't retain any stale guest-physical mappings which won't get
4430 * invalidated when flushing by VPID.
4431 */
4432 if (pHwvirtMsrs->u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
4433 {
4434 hmR0VmxFlushEpt(NULL /* pVCpu */, NULL /* pVmcsInfo */, VMXTLBFLUSHEPT_ALL_CONTEXTS);
4435 pHostCpu->fFlushAsidBeforeUse = false;
4436 }
4437 else
4438 pHostCpu->fFlushAsidBeforeUse = true;
4439
4440 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
4441 ++pHostCpu->cTlbFlushes;
4442
4443 return VINF_SUCCESS;
4444}
4445
4446
4447/**
4448 * Deactivates VT-x on the current CPU.
4449 *
4450 * @returns VBox status code.
4451 * @param pHostCpu The HM physical-CPU structure.
4452 * @param pvCpuPage Pointer to the VMXON region.
4453 * @param HCPhysCpuPage Physical address of the VMXON region.
4454 *
4455 * @remarks This function should never be called when SUPR0EnableVTx() or
4456 * similar was used to enable VT-x on the host.
4457 */
4458VMMR0DECL(int) VMXR0DisableCpu(PHMPHYSCPU pHostCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
4459{
4460 RT_NOREF2(pvCpuPage, HCPhysCpuPage);
4461
4462 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4463 return hmR0VmxLeaveRootMode(pHostCpu);
4464}
4465
4466
4467/**
4468 * Does per-VM VT-x initialization.
4469 *
4470 * @returns VBox status code.
4471 * @param pVM The cross context VM structure.
4472 */
4473VMMR0DECL(int) VMXR0InitVM(PVMCC pVM)
4474{
4475 AssertPtr(pVM);
4476 LogFlowFunc(("pVM=%p\n", pVM));
4477
4478 hmR0VmxStructsInit(pVM);
4479 int rc = hmR0VmxStructsAlloc(pVM);
4480 if (RT_FAILURE(rc))
4481 {
4482 LogRelFunc(("Failed to allocated VMX structures. rc=%Rrc\n", rc));
4483 return rc;
4484 }
4485
4486 /* Setup the crash dump page. */
4487#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4488 strcpy((char *)pVM->hmr0.s.vmx.pbScratch, "SCRATCH Magic");
4489 *(uint64_t *)(pVM->hmr0.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
4490#endif
4491 return VINF_SUCCESS;
4492}
4493
4494
4495/**
4496 * Does per-VM VT-x termination.
4497 *
4498 * @returns VBox status code.
4499 * @param pVM The cross context VM structure.
4500 */
4501VMMR0DECL(int) VMXR0TermVM(PVMCC pVM)
4502{
4503 AssertPtr(pVM);
4504 LogFlowFunc(("pVM=%p\n", pVM));
4505
4506#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4507 if (pVM->hmr0.s.vmx.pbScratch)
4508 RT_BZERO(pVM->hmr0.s.vmx.pbScratch, X86_PAGE_4K_SIZE);
4509#endif
4510 hmR0VmxStructsFree(pVM);
4511 return VINF_SUCCESS;
4512}
4513
4514
4515/**
4516 * Sets up the VM for execution using hardware-assisted VMX.
4517 * This function is only called once per-VM during initialization.
4518 *
4519 * @returns VBox status code.
4520 * @param pVM The cross context VM structure.
4521 */
4522VMMR0DECL(int) VMXR0SetupVM(PVMCC pVM)
4523{
4524 AssertPtr(pVM);
4525 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4526
4527 LogFlowFunc(("pVM=%p\n", pVM));
4528
4529 /*
4530 * At least verify if VMX is enabled, since we can't check if we're in VMX root mode or not
4531 * without causing a #GP.
4532 */
4533 RTCCUINTREG const uHostCr4 = ASMGetCR4();
4534 if (RT_LIKELY(uHostCr4 & X86_CR4_VMXE))
4535 { /* likely */ }
4536 else
4537 return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
4538
4539 /*
4540 * Check that nested paging is supported if enabled and copy over the flag to the
4541 * ring-0 only structure.
4542 */
4543 bool const fNestedPaging = pVM->hm.s.fNestedPagingCfg;
4544 AssertReturn( !fNestedPaging
4545 || (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_EPT), /** @todo use a ring-0 copy of ProcCtls2.n.allowed1 */
4546 VERR_INCOMPATIBLE_CONFIG);
4547 pVM->hmr0.s.fNestedPaging = fNestedPaging;
4548 pVM->hmr0.s.fAllow64BitGuests = pVM->hm.s.fAllow64BitGuestsCfg;
4549
4550 /*
4551 * Without unrestricted guest execution, pRealModeTSS and pNonPagingModeEPTPageTable *must*
4552 * always be allocated. We no longer support the highly unlikely case of unrestricted guest
4553 * without pRealModeTSS, see hmR3InitFinalizeR0Intel().
4554 */
4555 bool const fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuestCfg;
4556 AssertReturn( !fUnrestrictedGuest
4557 || ( (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
4558 && fNestedPaging),
4559 VERR_INCOMPATIBLE_CONFIG);
4560 if ( !fUnrestrictedGuest
4561 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
4562 || !pVM->hm.s.vmx.pRealModeTSS))
4563 {
4564 LogRelFunc(("Invalid real-on-v86 state.\n"));
4565 return VERR_INTERNAL_ERROR;
4566 }
4567 pVM->hmr0.s.vmx.fUnrestrictedGuest = fUnrestrictedGuest;
4568
4569 /* Initialize these always, see hmR3InitFinalizeR0().*/
4570 pVM->hm.s.ForR3.vmx.enmTlbFlushEpt = pVM->hmr0.s.vmx.enmTlbFlushEpt = VMXTLBFLUSHEPT_NONE;
4571 pVM->hm.s.ForR3.vmx.enmTlbFlushVpid = pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NONE;
4572
4573 /* Setup the tagged-TLB flush handlers. */
4574 int rc = hmR0VmxSetupTaggedTlb(pVM);
4575 if (RT_FAILURE(rc))
4576 {
4577 LogRelFunc(("Failed to setup tagged TLB. rc=%Rrc\n", rc));
4578 return rc;
4579 }
4580
4581 /* Determine LBR capabilities. */
4582 pVM->hmr0.s.vmx.fLbr = pVM->hm.s.vmx.fLbrCfg;
4583 if (pVM->hmr0.s.vmx.fLbr)
4584 {
4585 rc = hmR0VmxSetupLbrMsrRange(pVM);
4586 if (RT_FAILURE(rc))
4587 {
4588 LogRelFunc(("Failed to setup LBR MSR range. rc=%Rrc\n", rc));
4589 return rc;
4590 }
4591 }
4592
4593#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4594 /* Setup the shadow VMCS fields array and VMREAD/VMWRITE bitmaps. */
4595 if (pVM->hmr0.s.vmx.fUseVmcsShadowing)
4596 {
4597 rc = hmR0VmxSetupShadowVmcsFieldsArrays(pVM);
4598 if (RT_SUCCESS(rc))
4599 hmR0VmxSetupVmreadVmwriteBitmaps(pVM);
4600 else
4601 {
4602 LogRelFunc(("Failed to setup shadow VMCS fields arrays. rc=%Rrc\n", rc));
4603 return rc;
4604 }
4605 }
4606#endif
4607
4608 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
4609 {
4610 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
4611 Log4Func(("pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
4612
4613 pVCpu->hmr0.s.vmx.pfnStartVm = hmR0VmxStartVmSelector;
4614
4615 rc = hmR0VmxSetupVmcs(pVCpu, &pVCpu->hmr0.s.vmx.VmcsInfo, false /* fIsNstGstVmcs */);
4616 if (RT_SUCCESS(rc))
4617 {
4618#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4619 if (pVM->cpum.ro.GuestFeatures.fVmx)
4620 {
4621 rc = hmR0VmxSetupVmcs(pVCpu, &pVCpu->hmr0.s.vmx.VmcsInfoNstGst, true /* fIsNstGstVmcs */);
4622 if (RT_SUCCESS(rc))
4623 { /* likely */ }
4624 else
4625 {
4626 LogRelFunc(("Nested-guest VMCS setup failed. rc=%Rrc\n", rc));
4627 return rc;
4628 }
4629 }
4630#endif
4631 }
4632 else
4633 {
4634 LogRelFunc(("VMCS setup failed. rc=%Rrc\n", rc));
4635 return rc;
4636 }
4637 }
4638
4639 return VINF_SUCCESS;
4640}
4641
4642
4643/**
4644 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
4645 * the VMCS.
4646 * @returns CR4 for passing along to hmR0VmxExportHostSegmentRegs.
4647 */
4648static uint64_t hmR0VmxExportHostControlRegs(void)
4649{
4650 int rc = VMXWriteVmcsNw(VMX_VMCS_HOST_CR0, ASMGetCR0()); AssertRC(rc);
4651 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_CR3, ASMGetCR3()); AssertRC(rc);
4652 uint64_t uHostCr4 = ASMGetCR4();
4653 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_CR4, uHostCr4); AssertRC(rc);
4654 return uHostCr4;
4655}
4656
4657
4658/**
4659 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
4660 * the host-state area in the VMCS.
4661 *
4662 * @returns VBox status code.
4663 * @param pVCpu The cross context virtual CPU structure.
4664 * @param uHostCr4 The host CR4 value.
4665 */
4666static int hmR0VmxExportHostSegmentRegs(PVMCPUCC pVCpu, uint64_t uHostCr4)
4667{
4668 /*
4669 * If we've executed guest code using hardware-assisted VMX, the host-state bits
4670 * will be messed up. We should -not- save the messed up state without restoring
4671 * the original host-state, see @bugref{7240}.
4672 *
4673 * This apparently can happen (most likely the FPU changes), deal with it rather than
4674 * asserting. Was observed booting Solaris 10u10 32-bit guest.
4675 */
4676 if (pVCpu->hmr0.s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED)
4677 {
4678 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hmr0.s.vmx.fRestoreHostFlags,
4679 pVCpu->idCpu));
4680 VMXRestoreHostState(pVCpu->hmr0.s.vmx.fRestoreHostFlags, &pVCpu->hmr0.s.vmx.RestoreHost);
4681 pVCpu->hmr0.s.vmx.fRestoreHostFlags = 0;
4682 }
4683
4684 /*
4685 * Get all the host info.
4686 * ASSUME it is safe to use rdfsbase and friends if the CR4.FSGSBASE bit is set
4687 * without also checking the cpuid bit.
4688 */
4689 uint32_t fRestoreHostFlags;
4690#if RT_INLINE_ASM_EXTERNAL
4691 if (uHostCr4 & X86_CR4_FSGSBASE)
4692 {
4693 hmR0VmxExportHostSegmentRegsAsmHlp(&pVCpu->hmr0.s.vmx.RestoreHost, true /*fHaveFsGsBase*/);
4694 fRestoreHostFlags = VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE;
4695 }
4696 else
4697 {
4698 hmR0VmxExportHostSegmentRegsAsmHlp(&pVCpu->hmr0.s.vmx.RestoreHost, false /*fHaveFsGsBase*/);
4699 fRestoreHostFlags = 0;
4700 }
4701 RTSEL uSelES = pVCpu->hmr0.s.vmx.RestoreHost.uHostSelES;
4702 RTSEL uSelDS = pVCpu->hmr0.s.vmx.RestoreHost.uHostSelDS;
4703 RTSEL uSelFS = pVCpu->hmr0.s.vmx.RestoreHost.uHostSelFS;
4704 RTSEL uSelGS = pVCpu->hmr0.s.vmx.RestoreHost.uHostSelGS;
4705#else
4706 pVCpu->hmr0.s.vmx.RestoreHost.uHostSelTR = ASMGetTR();
4707 pVCpu->hmr0.s.vmx.RestoreHost.uHostSelSS = ASMGetSS();
4708 pVCpu->hmr0.s.vmx.RestoreHost.uHostSelCS = ASMGetCS();
4709 ASMGetGDTR((PRTGDTR)&pVCpu->hmr0.s.vmx.RestoreHost.HostGdtr);
4710 ASMGetIDTR((PRTIDTR)&pVCpu->hmr0.s.vmx.RestoreHost.HostIdtr);
4711 if (uHostCr4 & X86_CR4_FSGSBASE)
4712 {
4713 pVCpu->hmr0.s.vmx.RestoreHost.uHostFSBase = ASMGetFSBase();
4714 pVCpu->hmr0.s.vmx.RestoreHost.uHostGSBase = ASMGetGSBase();
4715 fRestoreHostFlags = VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE;
4716 }
4717 else
4718 {
4719 pVCpu->hmr0.s.vmx.RestoreHost.uHostFSBase = ASMRdMsr(MSR_K8_FS_BASE);
4720 pVCpu->hmr0.s.vmx.RestoreHost.uHostGSBase = ASMRdMsr(MSR_K8_GS_BASE);
4721 fRestoreHostFlags = 0;
4722 }
4723 RTSEL uSelES, uSelDS, uSelFS, uSelGS;
4724 pVCpu->hmr0.s.vmx.RestoreHost.uHostSelDS = uSelDS = ASMGetDS();
4725 pVCpu->hmr0.s.vmx.RestoreHost.uHostSelES = uSelES = ASMGetES();
4726 pVCpu->hmr0.s.vmx.RestoreHost.uHostSelFS = uSelFS = ASMGetFS();
4727 pVCpu->hmr0.s.vmx.RestoreHost.uHostSelGS = uSelGS = ASMGetGS();
4728#endif
4729
4730 /*
4731 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to
4732 * gain VM-entry and restore them before we get preempted.
4733 *
4734 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
4735 */
4736 RTSEL const uSelAll = uSelFS | uSelGS | uSelES | uSelDS;
4737 if (uSelAll & (X86_SEL_RPL | X86_SEL_LDT))
4738 {
4739 if (!(uSelAll & X86_SEL_LDT))
4740 {
4741#define VMXLOCAL_ADJUST_HOST_SEG(a_Seg, a_uVmcsVar) \
4742 do { \
4743 (a_uVmcsVar) = pVCpu->hmr0.s.vmx.RestoreHost.uHostSel##a_Seg; \
4744 if ((a_uVmcsVar) & X86_SEL_RPL) \
4745 { \
4746 fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##a_Seg; \
4747 (a_uVmcsVar) = 0; \
4748 } \
4749 } while (0)
4750 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS);
4751 VMXLOCAL_ADJUST_HOST_SEG(ES, uSelES);
4752 VMXLOCAL_ADJUST_HOST_SEG(FS, uSelFS);
4753 VMXLOCAL_ADJUST_HOST_SEG(GS, uSelGS);
4754#undef VMXLOCAL_ADJUST_HOST_SEG
4755 }
4756 else
4757 {
4758#define VMXLOCAL_ADJUST_HOST_SEG(a_Seg, a_uVmcsVar) \
4759 do { \
4760 (a_uVmcsVar) = pVCpu->hmr0.s.vmx.RestoreHost.uHostSel##a_Seg; \
4761 if ((a_uVmcsVar) & (X86_SEL_RPL | X86_SEL_LDT)) \
4762 { \
4763 if (!((a_uVmcsVar) & X86_SEL_LDT)) \
4764 fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##a_Seg; \
4765 else \
4766 { \
4767 uint32_t const fAttr = ASMGetSegAttr(a_uVmcsVar); \
4768 if ((fAttr & X86_DESC_P) && fAttr != UINT32_MAX) \
4769 fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##a_Seg; \
4770 } \
4771 (a_uVmcsVar) = 0; \
4772 } \
4773 } while (0)
4774 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS);
4775 VMXLOCAL_ADJUST_HOST_SEG(ES, uSelES);
4776 VMXLOCAL_ADJUST_HOST_SEG(FS, uSelFS);
4777 VMXLOCAL_ADJUST_HOST_SEG(GS, uSelGS);
4778#undef VMXLOCAL_ADJUST_HOST_SEG
4779 }
4780 }
4781
4782 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
4783 Assert(!(pVCpu->hmr0.s.vmx.RestoreHost.uHostSelTR & X86_SEL_RPL)); Assert(!(pVCpu->hmr0.s.vmx.RestoreHost.uHostSelTR & X86_SEL_LDT)); Assert(pVCpu->hmr0.s.vmx.RestoreHost.uHostSelTR);
4784 Assert(!(pVCpu->hmr0.s.vmx.RestoreHost.uHostSelCS & X86_SEL_RPL)); Assert(!(pVCpu->hmr0.s.vmx.RestoreHost.uHostSelCS & X86_SEL_LDT)); Assert(pVCpu->hmr0.s.vmx.RestoreHost.uHostSelCS);
4785 Assert(!(pVCpu->hmr0.s.vmx.RestoreHost.uHostSelSS & X86_SEL_RPL)); Assert(!(pVCpu->hmr0.s.vmx.RestoreHost.uHostSelSS & X86_SEL_LDT));
4786 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
4787 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
4788 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
4789 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
4790
4791 /*
4792 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps
4793 * them to the maximum limit (0xffff) on every VM-exit.
4794 */
4795 if (pVCpu->hmr0.s.vmx.RestoreHost.HostGdtr.cb != 0xffff)
4796 fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
4797
4798 /*
4799 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT" and
4800 * Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit
4801 * as 0xfff, VT-x bloating the limit to 0xffff shouldn't cause any different CPU behavior.
4802 * However, several hosts either insists on 0xfff being the limit (Windows Patch Guard) or
4803 * uses the limit for other purposes (darwin puts the CPU ID in there but botches sidt
4804 * alignment in at least one consumer). So, we're only allowing the IDTR.LIMIT to be left
4805 * at 0xffff on hosts where we are sure it won't cause trouble.
4806 */
4807#if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
4808 if (pVCpu->hmr0.s.vmx.RestoreHost.HostIdtr.cb < 0x0fff)
4809#else
4810 if (pVCpu->hmr0.s.vmx.RestoreHost.HostIdtr.cb != 0xffff)
4811#endif
4812 fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
4813
4814 /*
4815 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI
4816 * and RPL bits is effectively what the CPU does for "scaling by 8". TI is always 0 and
4817 * RPL should be too in most cases.
4818 */
4819 RTSEL const uSelTR = pVCpu->hmr0.s.vmx.RestoreHost.uHostSelTR;
4820 AssertMsgReturn((uSelTR | X86_SEL_RPL_LDT) <= pVCpu->hmr0.s.vmx.RestoreHost.HostGdtr.cb,
4821 ("TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, pVCpu->hmr0.s.vmx.RestoreHost.HostGdtr.cb),
4822 VERR_VMX_INVALID_HOST_STATE);
4823
4824 PCX86DESCHC pDesc = (PCX86DESCHC)(pVCpu->hmr0.s.vmx.RestoreHost.HostGdtr.uAddr + (uSelTR & X86_SEL_MASK));
4825 uintptr_t const uTRBase = X86DESC64_BASE(pDesc);
4826
4827 /*
4828 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on
4829 * all VM-exits. The type is the same for 64-bit busy TSS[1]. The limit needs manual
4830 * restoration if the host has something else. Task switching is not supported in 64-bit
4831 * mode[2], but the limit still matters as IOPM is supported in 64-bit mode. Restoring the
4832 * limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
4833 *
4834 * [1] See Intel spec. 3.5 "System Descriptor Types".
4835 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
4836 */
4837 Assert(pDesc->System.u4Type == 11);
4838 if ( pDesc->System.u16LimitLow != 0x67
4839 || pDesc->System.u4LimitHigh)
4840 {
4841 fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
4842
4843 /* If the host has made GDT read-only, we would need to temporarily toggle CR0.WP before writing the GDT. */
4844 if (g_fHmHostKernelFeatures & SUPKERNELFEATURES_GDT_READ_ONLY)
4845 fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_READ_ONLY;
4846 if (g_fHmHostKernelFeatures & SUPKERNELFEATURES_GDT_NEED_WRITABLE)
4847 {
4848 /* The GDT is read-only but the writable GDT is available. */
4849 fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_NEED_WRITABLE;
4850 pVCpu->hmr0.s.vmx.RestoreHost.HostGdtrRw.cb = pVCpu->hmr0.s.vmx.RestoreHost.HostGdtr.cb;
4851 int rc = SUPR0GetCurrentGdtRw(&pVCpu->hmr0.s.vmx.RestoreHost.HostGdtrRw.uAddr);
4852 AssertRCReturn(rc, rc);
4853 }
4854 }
4855
4856 pVCpu->hmr0.s.vmx.fRestoreHostFlags = fRestoreHostFlags;
4857
4858 /*
4859 * Do all the VMCS updates in one block to assist nested virtualization.
4860 */
4861 int rc;
4862 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_CS_SEL, pVCpu->hmr0.s.vmx.RestoreHost.uHostSelCS); AssertRC(rc);
4863 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_SS_SEL, pVCpu->hmr0.s.vmx.RestoreHost.uHostSelSS); AssertRC(rc);
4864 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_DS_SEL, uSelDS); AssertRC(rc);
4865 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_ES_SEL, uSelES); AssertRC(rc);
4866 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_FS_SEL, uSelFS); AssertRC(rc);
4867 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_GS_SEL, uSelGS); AssertRC(rc);
4868 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_TR_SEL, pVCpu->hmr0.s.vmx.RestoreHost.uHostSelTR); AssertRC(rc);
4869 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_GDTR_BASE, pVCpu->hmr0.s.vmx.RestoreHost.HostGdtr.uAddr); AssertRC(rc);
4870 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_IDTR_BASE, pVCpu->hmr0.s.vmx.RestoreHost.HostIdtr.uAddr); AssertRC(rc);
4871 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_TR_BASE, uTRBase); AssertRC(rc);
4872 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_FS_BASE, pVCpu->hmr0.s.vmx.RestoreHost.uHostFSBase); AssertRC(rc);
4873 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_GS_BASE, pVCpu->hmr0.s.vmx.RestoreHost.uHostGSBase); AssertRC(rc);
4874
4875 return VINF_SUCCESS;
4876}
4877
4878
4879/**
4880 * Exports certain host MSRs in the VM-exit MSR-load area and some in the
4881 * host-state area of the VMCS.
4882 *
4883 * These MSRs will be automatically restored on the host after every successful
4884 * VM-exit.
4885 *
4886 * @param pVCpu The cross context virtual CPU structure.
4887 *
4888 * @remarks No-long-jump zone!!!
4889 */
4890static void hmR0VmxExportHostMsrs(PVMCPUCC pVCpu)
4891{
4892 AssertPtr(pVCpu);
4893
4894 /*
4895 * Save MSRs that we restore lazily (due to preemption or transition to ring-3)
4896 * rather than swapping them on every VM-entry.
4897 */
4898 hmR0VmxLazySaveHostMsrs(pVCpu);
4899
4900 /*
4901 * Host Sysenter MSRs.
4902 */
4903 int rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)); AssertRC(rc);
4904 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); AssertRC(rc);
4905 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP)); AssertRC(rc);
4906
4907 /*
4908 * Host EFER MSR.
4909 *
4910 * If the CPU supports the newer VMCS controls for managing EFER, use it. Otherwise it's
4911 * done as part of auto-load/store MSR area in the VMCS, see hmR0VmxExportGuestMsrs().
4912 */
4913 if (g_fHmVmxSupportsVmcsEfer)
4914 {
4915 rc = VMXWriteVmcs64(VMX_VMCS64_HOST_EFER_FULL, g_uHmVmxHostMsrEfer);
4916 AssertRC(rc);
4917 }
4918
4919 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see
4920 * hmR0VmxExportGuestEntryExitCtls(). */
4921}
4922
4923
4924/**
4925 * Figures out if we need to swap the EFER MSR which is particularly expensive.
4926 *
4927 * We check all relevant bits. For now, that's everything besides LMA/LME, as
4928 * these two bits are handled by VM-entry, see hmR0VMxExportGuestEntryExitCtls().
4929 *
4930 * @returns true if we need to load guest EFER, false otherwise.
4931 * @param pVCpu The cross context virtual CPU structure.
4932 * @param pVmxTransient The VMX-transient structure.
4933 *
4934 * @remarks Requires EFER, CR4.
4935 * @remarks No-long-jump zone!!!
4936 */
4937static bool hmR0VmxShouldSwapEferMsr(PCVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
4938{
4939#ifdef HMVMX_ALWAYS_SWAP_EFER
4940 RT_NOREF2(pVCpu, pVmxTransient);
4941 return true;
4942#else
4943 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
4944 uint64_t const u64HostEfer = g_uHmVmxHostMsrEfer;
4945 uint64_t const u64GuestEfer = pCtx->msrEFER;
4946
4947# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4948 /*
4949 * For nested-guests, we shall honor swapping the EFER MSR when requested by
4950 * the nested-guest.
4951 */
4952 if ( pVmxTransient->fIsNestedGuest
4953 && ( CPUMIsGuestVmxEntryCtlsSet(pCtx, VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4954 || CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_SAVE_EFER_MSR)
4955 || CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_LOAD_EFER_MSR)))
4956 return true;
4957# else
4958 RT_NOREF(pVmxTransient);
4959#endif
4960
4961 /*
4962 * For 64-bit guests, if EFER.SCE bit differs, we need to swap the EFER MSR
4963 * to ensure that the guest's SYSCALL behaviour isn't broken, see @bugref{7386}.
4964 */
4965 if ( CPUMIsGuestInLongModeEx(pCtx)
4966 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
4967 return true;
4968
4969 /*
4970 * If the guest uses PAE and EFER.NXE bit differs, we need to swap the EFER MSR
4971 * as it affects guest paging. 64-bit paging implies CR4.PAE as well.
4972 *
4973 * See Intel spec. 4.5 "IA-32e Paging".
4974 * See Intel spec. 4.1.1 "Three Paging Modes".
4975 *
4976 * Verify that we always intercept CR4.PAE and CR0.PG bits, so we don't need to
4977 * import CR4 and CR0 from the VMCS here as those bits are always up to date.
4978 */
4979 Assert(hmR0VmxGetFixedCr4Mask(pVCpu) & X86_CR4_PAE);
4980 Assert(hmR0VmxGetFixedCr0Mask(pVCpu) & X86_CR0_PG);
4981 if ( (pCtx->cr4 & X86_CR4_PAE)
4982 && (pCtx->cr0 & X86_CR0_PG))
4983 {
4984 /*
4985 * If nested paging is not used, verify that the guest paging mode matches the
4986 * shadow paging mode which is/will be placed in the VMCS (which is what will
4987 * actually be used while executing the guest and not the CR4 shadow value).
4988 */
4989 AssertMsg( pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging
4990 || pVCpu->hm.s.enmShadowMode == PGMMODE_PAE
4991 || pVCpu->hm.s.enmShadowMode == PGMMODE_PAE_NX
4992 || pVCpu->hm.s.enmShadowMode == PGMMODE_AMD64
4993 || pVCpu->hm.s.enmShadowMode == PGMMODE_AMD64_NX,
4994 ("enmShadowMode=%u\n", pVCpu->hm.s.enmShadowMode));
4995 if ((u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
4996 {
4997 /* Verify that the host is NX capable. */
4998 Assert(pVCpu->CTX_SUFF(pVM)->cpum.ro.HostFeatures.fNoExecute);
4999 return true;
5000 }
5001 }
5002
5003 return false;
5004#endif
5005}
5006
5007
5008/**
5009 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
5010 * VMCS.
5011 *
5012 * This is typically required when the guest changes paging mode.
5013 *
5014 * @returns VBox status code.
5015 * @param pVCpu The cross context virtual CPU structure.
5016 * @param pVmxTransient The VMX-transient structure.
5017 *
5018 * @remarks Requires EFER.
5019 * @remarks No-long-jump zone!!!
5020 */
5021static int hmR0VmxExportGuestEntryExitCtls(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
5022{
5023 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS)
5024 {
5025 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5026 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5027
5028 /*
5029 * VM-entry controls.
5030 */
5031 {
5032 uint32_t fVal = g_HmMsrs.u.vmx.EntryCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
5033 uint32_t const fZap = g_HmMsrs.u.vmx.EntryCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
5034
5035 /*
5036 * Load the guest debug controls (DR7 and IA32_DEBUGCTL MSR) on VM-entry.
5037 * The first VT-x capable CPUs only supported the 1-setting of this bit.
5038 *
5039 * For nested-guests, this is a mandatory VM-entry control. It's also
5040 * required because we do not want to leak host bits to the nested-guest.
5041 */
5042 fVal |= VMX_ENTRY_CTLS_LOAD_DEBUG;
5043
5044 /*
5045 * Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry.
5046 *
5047 * For nested-guests, the "IA-32e mode guest" control we initialize with what is
5048 * required to get the nested-guest working with hardware-assisted VMX execution.
5049 * It depends on the nested-guest's IA32_EFER.LMA bit. Remember, a nested hypervisor
5050 * can skip intercepting changes to the EFER MSR. This is why it needs to be done
5051 * here rather than while merging the guest VMCS controls.
5052 */
5053 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
5054 {
5055 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
5056 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
5057 }
5058 else
5059 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
5060
5061 /*
5062 * If the CPU supports the newer VMCS controls for managing guest/host EFER, use it.
5063 *
5064 * For nested-guests, we use the "load IA32_EFER" if the hardware supports it,
5065 * regardless of whether the nested-guest VMCS specifies it because we are free to
5066 * load whatever MSRs we require and we do not need to modify the guest visible copy
5067 * of the VM-entry MSR load area.
5068 */
5069 if ( g_fHmVmxSupportsVmcsEfer
5070 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
5071 fVal |= VMX_ENTRY_CTLS_LOAD_EFER_MSR;
5072 else
5073 Assert(!(fVal & VMX_ENTRY_CTLS_LOAD_EFER_MSR));
5074
5075 /*
5076 * The following should -not- be set (since we're not in SMM mode):
5077 * - VMX_ENTRY_CTLS_ENTRY_TO_SMM
5078 * - VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON
5079 */
5080
5081 /** @todo VMX_ENTRY_CTLS_LOAD_PERF_MSR,
5082 * VMX_ENTRY_CTLS_LOAD_PAT_MSR. */
5083
5084 if ((fVal & fZap) == fVal)
5085 { /* likely */ }
5086 else
5087 {
5088 Log4Func(("Invalid VM-entry controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
5089 g_HmMsrs.u.vmx.EntryCtls.n.allowed0, fVal, fZap));
5090 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
5091 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
5092 }
5093
5094 /* Commit it to the VMCS. */
5095 if (pVmcsInfo->u32EntryCtls != fVal)
5096 {
5097 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal);
5098 AssertRC(rc);
5099 pVmcsInfo->u32EntryCtls = fVal;
5100 }
5101 }
5102
5103 /*
5104 * VM-exit controls.
5105 */
5106 {
5107 uint32_t fVal = g_HmMsrs.u.vmx.ExitCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
5108 uint32_t const fZap = g_HmMsrs.u.vmx.ExitCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
5109
5110 /*
5111 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only
5112 * supported the 1-setting of this bit.
5113 *
5114 * For nested-guests, we set the "save debug controls" as the converse
5115 * "load debug controls" is mandatory for nested-guests anyway.
5116 */
5117 fVal |= VMX_EXIT_CTLS_SAVE_DEBUG;
5118
5119 /*
5120 * Set the host long mode active (EFER.LMA) bit (which Intel calls
5121 * "Host address-space size") if necessary. On VM-exit, VT-x sets both the
5122 * host EFER.LMA and EFER.LME bit to this value. See assertion in
5123 * hmR0VmxExportHostMsrs().
5124 *
5125 * For nested-guests, we always set this bit as we do not support 32-bit
5126 * hosts.
5127 */
5128 fVal |= VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE;
5129
5130 /*
5131 * If the VMCS EFER MSR fields are supported by the hardware, we use it.
5132 *
5133 * For nested-guests, we should use the "save IA32_EFER" control if we also
5134 * used the "load IA32_EFER" control while exporting VM-entry controls.
5135 */
5136 if ( g_fHmVmxSupportsVmcsEfer
5137 && hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
5138 {
5139 fVal |= VMX_EXIT_CTLS_SAVE_EFER_MSR
5140 | VMX_EXIT_CTLS_LOAD_EFER_MSR;
5141 }
5142
5143 /*
5144 * Enable saving of the VMX-preemption timer value on VM-exit.
5145 * For nested-guests, currently not exposed/used.
5146 */
5147 /** @todo r=bird: Measure performance hit because of this vs. always rewriting
5148 * the timer value. */
5149 if (pVM->hmr0.s.vmx.fUsePreemptTimer)
5150 {
5151 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER);
5152 fVal |= VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER;
5153 }
5154
5155 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
5156 Assert(!(fVal & VMX_EXIT_CTLS_ACK_EXT_INT));
5157
5158 /** @todo VMX_EXIT_CTLS_LOAD_PERF_MSR,
5159 * VMX_EXIT_CTLS_SAVE_PAT_MSR,
5160 * VMX_EXIT_CTLS_LOAD_PAT_MSR. */
5161
5162 if ((fVal & fZap) == fVal)
5163 { /* likely */ }
5164 else
5165 {
5166 Log4Func(("Invalid VM-exit controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%R#X32\n",
5167 g_HmMsrs.u.vmx.ExitCtls.n.allowed0, fVal, fZap));
5168 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
5169 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
5170 }
5171
5172 /* Commit it to the VMCS. */
5173 if (pVmcsInfo->u32ExitCtls != fVal)
5174 {
5175 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal);
5176 AssertRC(rc);
5177 pVmcsInfo->u32ExitCtls = fVal;
5178 }
5179 }
5180
5181 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
5182 }
5183 return VINF_SUCCESS;
5184}
5185
5186
5187/**
5188 * Sets the TPR threshold in the VMCS.
5189 *
5190 * @param pVmcsInfo The VMCS info. object.
5191 * @param u32TprThreshold The TPR threshold (task-priority class only).
5192 */
5193DECLINLINE(void) hmR0VmxApicSetTprThreshold(PVMXVMCSINFO pVmcsInfo, uint32_t u32TprThreshold)
5194{
5195 Assert(!(u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)); /* Bits 31:4 MBZ. */
5196 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
5197 RT_NOREF(pVmcsInfo);
5198 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
5199 AssertRC(rc);
5200}
5201
5202
5203/**
5204 * Exports the guest APIC TPR state into the VMCS.
5205 *
5206 * @param pVCpu The cross context virtual CPU structure.
5207 * @param pVmxTransient The VMX-transient structure.
5208 *
5209 * @remarks No-long-jump zone!!!
5210 */
5211static void hmR0VmxExportGuestApicTpr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
5212{
5213 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR)
5214 {
5215 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_APIC_TPR);
5216
5217 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5218 if (!pVmxTransient->fIsNestedGuest)
5219 {
5220 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM))
5221 && APICIsEnabled(pVCpu))
5222 {
5223 /*
5224 * Setup TPR shadowing.
5225 */
5226 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
5227 {
5228 bool fPendingIntr = false;
5229 uint8_t u8Tpr = 0;
5230 uint8_t u8PendingIntr = 0;
5231 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
5232 AssertRC(rc);
5233
5234 /*
5235 * If there are interrupts pending but masked by the TPR, instruct VT-x to
5236 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the
5237 * priority of the pending interrupt so we can deliver the interrupt. If there
5238 * are no interrupts pending, set threshold to 0 to not cause any
5239 * TPR-below-threshold VM-exits.
5240 */
5241 uint32_t u32TprThreshold = 0;
5242 if (fPendingIntr)
5243 {
5244 /* Bits 3:0 of the TPR threshold field correspond to bits 7:4 of the TPR
5245 (which is the Task-Priority Class). */
5246 const uint8_t u8PendingPriority = u8PendingIntr >> 4;
5247 const uint8_t u8TprPriority = u8Tpr >> 4;
5248 if (u8PendingPriority <= u8TprPriority)
5249 u32TprThreshold = u8PendingPriority;
5250 }
5251
5252 hmR0VmxApicSetTprThreshold(pVmcsInfo, u32TprThreshold);
5253 }
5254 }
5255 }
5256 /* else: the TPR threshold has already been updated while merging the nested-guest VMCS. */
5257 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
5258 }
5259}
5260
5261
5262/**
5263 * Gets the guest interruptibility-state and updates related force-flags.
5264 *
5265 * @returns Guest's interruptibility-state.
5266 * @param pVCpu The cross context virtual CPU structure.
5267 *
5268 * @remarks No-long-jump zone!!!
5269 */
5270static uint32_t hmR0VmxGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
5271{
5272 /*
5273 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
5274 */
5275 uint32_t fIntrState = 0;
5276 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
5277 {
5278 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
5279 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
5280
5281 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5282 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
5283 {
5284 if (pCtx->eflags.Bits.u1IF)
5285 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
5286 else
5287 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
5288 }
5289 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
5290 {
5291 /*
5292 * We can clear the inhibit force flag as even if we go back to the recompiler
5293 * without executing guest code in VT-x, the flag's condition to be cleared is
5294 * met and thus the cleared state is correct.
5295 */
5296 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5297 }
5298 }
5299
5300 /*
5301 * Check if we should inhibit NMI delivery.
5302 */
5303 if (CPUMIsGuestNmiBlocking(pVCpu))
5304 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
5305
5306 /*
5307 * Validate.
5308 */
5309#ifdef VBOX_STRICT
5310 /* We don't support block-by-SMI yet.*/
5311 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI));
5312
5313 /* Block-by-STI must not be set when interrupts are disabled. */
5314 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
5315 {
5316 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5317 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_IF);
5318 }
5319#endif
5320
5321 return fIntrState;
5322}
5323
5324
5325/**
5326 * Exports the exception intercepts required for guest execution in the VMCS.
5327 *
5328 * @param pVCpu The cross context virtual CPU structure.
5329 * @param pVmxTransient The VMX-transient structure.
5330 *
5331 * @remarks No-long-jump zone!!!
5332 */
5333static void hmR0VmxExportGuestXcptIntercepts(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
5334{
5335 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_XCPT_INTERCEPTS)
5336 {
5337 /* When executing a nested-guest, we do not need to trap GIM hypercalls by intercepting #UD. */
5338 if ( !pVmxTransient->fIsNestedGuest
5339 && pVCpu->hm.s.fGIMTrapXcptUD)
5340 hmR0VmxAddXcptIntercept(pVmxTransient, X86_XCPT_UD);
5341 else
5342 hmR0VmxRemoveXcptIntercept(pVCpu, pVmxTransient, X86_XCPT_UD);
5343
5344 /* Other exception intercepts are handled elsewhere, e.g. while exporting guest CR0. */
5345 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_XCPT_INTERCEPTS);
5346 }
5347}
5348
5349
5350/**
5351 * Exports the guest's RIP into the guest-state area in the VMCS.
5352 *
5353 * @param pVCpu The cross context virtual CPU structure.
5354 *
5355 * @remarks No-long-jump zone!!!
5356 */
5357static void hmR0VmxExportGuestRip(PVMCPUCC pVCpu)
5358{
5359 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP)
5360 {
5361 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP);
5362
5363 int rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_RIP, pVCpu->cpum.GstCtx.rip);
5364 AssertRC(rc);
5365
5366 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RIP);
5367 Log4Func(("rip=%#RX64\n", pVCpu->cpum.GstCtx.rip));
5368 }
5369}
5370
5371
5372/**
5373 * Exports the guest's RSP into the guest-state area in the VMCS.
5374 *
5375 * @param pVCpu The cross context virtual CPU structure.
5376 *
5377 * @remarks No-long-jump zone!!!
5378 */
5379static void hmR0VmxExportGuestRsp(PVMCPUCC pVCpu)
5380{
5381 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RSP)
5382 {
5383 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RSP);
5384
5385 int rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_RSP, pVCpu->cpum.GstCtx.rsp);
5386 AssertRC(rc);
5387
5388 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RSP);
5389 Log4Func(("rsp=%#RX64\n", pVCpu->cpum.GstCtx.rsp));
5390 }
5391}
5392
5393
5394/**
5395 * Exports the guest's RFLAGS into the guest-state area in the VMCS.
5396 *
5397 * @param pVCpu The cross context virtual CPU structure.
5398 * @param pVmxTransient The VMX-transient structure.
5399 *
5400 * @remarks No-long-jump zone!!!
5401 */
5402static void hmR0VmxExportGuestRflags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
5403{
5404 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS)
5405 {
5406 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
5407
5408 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
5409 Let us assert it as such and use 32-bit VMWRITE. */
5410 Assert(!RT_HI_U32(pVCpu->cpum.GstCtx.rflags.u64));
5411 X86EFLAGS fEFlags = pVCpu->cpum.GstCtx.eflags;
5412 Assert(fEFlags.u32 & X86_EFL_RA1_MASK);
5413 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK)));
5414
5415 /*
5416 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so
5417 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x
5418 * can run the real-mode guest code under Virtual 8086 mode.
5419 */
5420 PVMXVMCSINFOSHARED pVmcsInfo = pVmxTransient->pVmcsInfo->pShared;
5421 if (pVmcsInfo->RealMode.fRealOnV86Active)
5422 {
5423 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
5424 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
5425 Assert(!pVmxTransient->fIsNestedGuest);
5426 pVmcsInfo->RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */
5427 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
5428 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
5429 }
5430
5431 int rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
5432 AssertRC(rc);
5433
5434 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
5435 Log4Func(("eflags=%#RX32\n", fEFlags.u32));
5436 }
5437}
5438
5439
5440#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5441/**
5442 * Copies the nested-guest VMCS to the shadow VMCS.
5443 *
5444 * @returns VBox status code.
5445 * @param pVCpu The cross context virtual CPU structure.
5446 * @param pVmcsInfo The VMCS info. object.
5447 *
5448 * @remarks No-long-jump zone!!!
5449 */
5450static int hmR0VmxCopyNstGstToShadowVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
5451{
5452 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
5453 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5454
5455 /*
5456 * Disable interrupts so we don't get preempted while the shadow VMCS is the
5457 * current VMCS, as we may try saving guest lazy MSRs.
5458 *
5459 * Strictly speaking the lazy MSRs are not in the VMCS, but I'd rather not risk
5460 * calling the import VMCS code which is currently performing the guest MSR reads
5461 * (on 64-bit hosts) and accessing the auto-load/store MSR area on 32-bit hosts
5462 * and the rest of the VMX leave session machinery.
5463 */
5464 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
5465
5466 int rc = hmR0VmxLoadShadowVmcs(pVmcsInfo);
5467 if (RT_SUCCESS(rc))
5468 {
5469 /*
5470 * Copy all guest read/write VMCS fields.
5471 *
5472 * We don't check for VMWRITE failures here for performance reasons and
5473 * because they are not expected to fail, barring irrecoverable conditions
5474 * like hardware errors.
5475 */
5476 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
5477 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
5478 {
5479 uint64_t u64Val;
5480 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
5481 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
5482 VMXWriteVmcs64(uVmcsField, u64Val);
5483 }
5484
5485 /*
5486 * If the host CPU supports writing all VMCS fields, copy the guest read-only
5487 * VMCS fields, so the guest can VMREAD them without causing a VM-exit.
5488 */
5489 if (g_HmMsrs.u.vmx.u64Misc & VMX_MISC_VMWRITE_ALL)
5490 {
5491 uint32_t const cShadowVmcsRoFields = pVM->hmr0.s.vmx.cShadowVmcsRoFields;
5492 for (uint32_t i = 0; i < cShadowVmcsRoFields; i++)
5493 {
5494 uint64_t u64Val;
5495 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsRoFields[i];
5496 IEMReadVmxVmcsField(pVmcsNstGst, uVmcsField, &u64Val);
5497 VMXWriteVmcs64(uVmcsField, u64Val);
5498 }
5499 }
5500
5501 rc = hmR0VmxClearShadowVmcs(pVmcsInfo);
5502 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
5503 }
5504
5505 ASMSetFlags(fEFlags);
5506 return rc;
5507}
5508
5509
5510/**
5511 * Copies the shadow VMCS to the nested-guest VMCS.
5512 *
5513 * @returns VBox status code.
5514 * @param pVCpu The cross context virtual CPU structure.
5515 * @param pVmcsInfo The VMCS info. object.
5516 *
5517 * @remarks Called with interrupts disabled.
5518 */
5519static int hmR0VmxCopyShadowToNstGstVmcs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
5520{
5521 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
5522 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
5523 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5524
5525 int rc = hmR0VmxLoadShadowVmcs(pVmcsInfo);
5526 if (RT_SUCCESS(rc))
5527 {
5528 /*
5529 * Copy guest read/write fields from the shadow VMCS.
5530 * Guest read-only fields cannot be modified, so no need to copy them.
5531 *
5532 * We don't check for VMREAD failures here for performance reasons and
5533 * because they are not expected to fail, barring irrecoverable conditions
5534 * like hardware errors.
5535 */
5536 uint32_t const cShadowVmcsFields = pVM->hmr0.s.vmx.cShadowVmcsFields;
5537 for (uint32_t i = 0; i < cShadowVmcsFields; i++)
5538 {
5539 uint64_t u64Val;
5540 uint32_t const uVmcsField = pVM->hmr0.s.vmx.paShadowVmcsFields[i];
5541 VMXReadVmcs64(uVmcsField, &u64Val);
5542 IEMWriteVmxVmcsField(pVmcsNstGst, uVmcsField, u64Val);
5543 }
5544
5545 rc = hmR0VmxClearShadowVmcs(pVmcsInfo);
5546 rc |= hmR0VmxLoadVmcs(pVmcsInfo);
5547 }
5548 return rc;
5549}
5550
5551
5552/**
5553 * Enables VMCS shadowing for the given VMCS info. object.
5554 *
5555 * @param pVmcsInfo The VMCS info. object.
5556 *
5557 * @remarks No-long-jump zone!!!
5558 */
5559static void hmR0VmxEnableVmcsShadowing(PVMXVMCSINFO pVmcsInfo)
5560{
5561 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
5562 if (!(uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING))
5563 {
5564 Assert(pVmcsInfo->HCPhysShadowVmcs != 0 && pVmcsInfo->HCPhysShadowVmcs != NIL_RTHCPHYS);
5565 uProcCtls2 |= VMX_PROC_CTLS2_VMCS_SHADOWING;
5566 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
5567 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, pVmcsInfo->HCPhysShadowVmcs); AssertRC(rc);
5568 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
5569 pVmcsInfo->u64VmcsLinkPtr = pVmcsInfo->HCPhysShadowVmcs;
5570 Log4Func(("Enabled\n"));
5571 }
5572}
5573
5574
5575/**
5576 * Disables VMCS shadowing for the given VMCS info. object.
5577 *
5578 * @param pVmcsInfo The VMCS info. object.
5579 *
5580 * @remarks No-long-jump zone!!!
5581 */
5582static void hmR0VmxDisableVmcsShadowing(PVMXVMCSINFO pVmcsInfo)
5583{
5584 /*
5585 * We want all VMREAD and VMWRITE instructions to cause VM-exits, so we clear the
5586 * VMCS shadowing control. However, VM-entry requires the shadow VMCS indicator bit
5587 * to match the VMCS shadowing control if the VMCS link pointer is not NIL_RTHCPHYS.
5588 * Hence, we must also reset the VMCS link pointer to ensure VM-entry does not fail.
5589 *
5590 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
5591 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
5592 */
5593 uint32_t uProcCtls2 = pVmcsInfo->u32ProcCtls2;
5594 if (uProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
5595 {
5596 uProcCtls2 &= ~VMX_PROC_CTLS2_VMCS_SHADOWING;
5597 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, uProcCtls2); AssertRC(rc);
5598 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, NIL_RTHCPHYS); AssertRC(rc);
5599 pVmcsInfo->u32ProcCtls2 = uProcCtls2;
5600 pVmcsInfo->u64VmcsLinkPtr = NIL_RTHCPHYS;
5601 Log4Func(("Disabled\n"));
5602 }
5603}
5604#endif
5605
5606
5607/**
5608 * Exports the guest hardware-virtualization state.
5609 *
5610 * @returns VBox status code.
5611 * @param pVCpu The cross context virtual CPU structure.
5612 * @param pVmxTransient The VMX-transient structure.
5613 *
5614 * @remarks No-long-jump zone!!!
5615 */
5616static int hmR0VmxExportGuestHwvirtState(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
5617{
5618 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_HWVIRT)
5619 {
5620#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5621 /*
5622 * Check if the VMX feature is exposed to the guest and if the host CPU supports
5623 * VMCS shadowing.
5624 */
5625 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUseVmcsShadowing)
5626 {
5627 /*
5628 * If the nested hypervisor has loaded a current VMCS and is in VMX root mode,
5629 * copy the nested hypervisor's current VMCS into the shadow VMCS and enable
5630 * VMCS shadowing to skip intercepting some or all VMREAD/VMWRITE VM-exits.
5631 *
5632 * We check for VMX root mode here in case the guest executes VMXOFF without
5633 * clearing the current VMCS pointer and our VMXOFF instruction emulation does
5634 * not clear the current VMCS pointer.
5635 */
5636 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5637 if ( CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx)
5638 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
5639 && CPUMIsGuestVmxCurrentVmcsValid(&pVCpu->cpum.GstCtx))
5640 {
5641 /* Paranoia. */
5642 Assert(!pVmxTransient->fIsNestedGuest);
5643
5644 /*
5645 * For performance reasons, also check if the nested hypervisor's current VMCS
5646 * was newly loaded or modified before copying it to the shadow VMCS.
5647 */
5648 if (!pVCpu->hm.s.vmx.fCopiedNstGstToShadowVmcs)
5649 {
5650 int rc = hmR0VmxCopyNstGstToShadowVmcs(pVCpu, pVmcsInfo);
5651 AssertRCReturn(rc, rc);
5652 pVCpu->hm.s.vmx.fCopiedNstGstToShadowVmcs = true;
5653 }
5654 hmR0VmxEnableVmcsShadowing(pVmcsInfo);
5655 }
5656 else
5657 hmR0VmxDisableVmcsShadowing(pVmcsInfo);
5658 }
5659#else
5660 NOREF(pVmxTransient);
5661#endif
5662 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_HWVIRT);
5663 }
5664 return VINF_SUCCESS;
5665}
5666
5667
5668/**
5669 * Exports the guest CR0 control register into the guest-state area in the VMCS.
5670 *
5671 * The guest FPU state is always pre-loaded hence we don't need to bother about
5672 * sharing FPU related CR0 bits between the guest and host.
5673 *
5674 * @returns VBox status code.
5675 * @param pVCpu The cross context virtual CPU structure.
5676 * @param pVmxTransient The VMX-transient structure.
5677 *
5678 * @remarks No-long-jump zone!!!
5679 */
5680static int hmR0VmxExportGuestCR0(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
5681{
5682 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR0)
5683 {
5684 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5685 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5686
5687 uint64_t fSetCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed0;
5688 uint64_t const fZapCr0 = g_HmMsrs.u.vmx.u64Cr0Fixed1;
5689 if (pVM->hmr0.s.vmx.fUnrestrictedGuest)
5690 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
5691 else
5692 Assert((fSetCr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
5693
5694 if (!pVmxTransient->fIsNestedGuest)
5695 {
5696 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5697 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
5698 uint64_t const u64ShadowCr0 = u64GuestCr0;
5699 Assert(!RT_HI_U32(u64GuestCr0));
5700
5701 /*
5702 * Setup VT-x's view of the guest CR0.
5703 */
5704 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
5705 if (pVM->hmr0.s.fNestedPaging)
5706 {
5707 if (CPUMIsGuestPagingEnabled(pVCpu))
5708 {
5709 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */
5710 uProcCtls &= ~( VMX_PROC_CTLS_CR3_LOAD_EXIT
5711 | VMX_PROC_CTLS_CR3_STORE_EXIT);
5712 }
5713 else
5714 {
5715 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */
5716 uProcCtls |= VMX_PROC_CTLS_CR3_LOAD_EXIT
5717 | VMX_PROC_CTLS_CR3_STORE_EXIT;
5718 }
5719
5720 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
5721 if (pVM->hmr0.s.vmx.fUnrestrictedGuest)
5722 uProcCtls &= ~VMX_PROC_CTLS_CR3_STORE_EXIT;
5723 }
5724 else
5725 {
5726 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
5727 u64GuestCr0 |= X86_CR0_WP;
5728 }
5729
5730 /*
5731 * Guest FPU bits.
5732 *
5733 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state
5734 * using CR0.TS.
5735 *
5736 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be
5737 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
5738 */
5739 u64GuestCr0 |= X86_CR0_NE;
5740
5741 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */
5742 bool const fInterceptMF = !(u64ShadowCr0 & X86_CR0_NE);
5743
5744 /*
5745 * Update exception intercepts.
5746 */
5747 uint32_t uXcptBitmap = pVmcsInfo->u32XcptBitmap;
5748 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5749 {
5750 Assert(PDMVmmDevHeapIsEnabled(pVM));
5751 Assert(pVM->hm.s.vmx.pRealModeTSS);
5752 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
5753 }
5754 else
5755 {
5756 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */
5757 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
5758 if (fInterceptMF)
5759 uXcptBitmap |= RT_BIT(X86_XCPT_MF);
5760 }
5761
5762 /* Additional intercepts for debugging, define these yourself explicitly. */
5763#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
5764 uXcptBitmap |= 0
5765 | RT_BIT(X86_XCPT_BP)
5766 | RT_BIT(X86_XCPT_DE)
5767 | RT_BIT(X86_XCPT_NM)
5768 | RT_BIT(X86_XCPT_TS)
5769 | RT_BIT(X86_XCPT_UD)
5770 | RT_BIT(X86_XCPT_NP)
5771 | RT_BIT(X86_XCPT_SS)
5772 | RT_BIT(X86_XCPT_GP)
5773 | RT_BIT(X86_XCPT_PF)
5774 | RT_BIT(X86_XCPT_MF)
5775 ;
5776#elif defined(HMVMX_ALWAYS_TRAP_PF)
5777 uXcptBitmap |= RT_BIT(X86_XCPT_PF);
5778#endif
5779 if (pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv)
5780 uXcptBitmap |= RT_BIT(X86_XCPT_GP);
5781 Assert(pVM->hmr0.s.fNestedPaging || (uXcptBitmap & RT_BIT(X86_XCPT_PF)));
5782
5783 /* Apply the hardware specified CR0 fixed bits and enable caching. */
5784 u64GuestCr0 |= fSetCr0;
5785 u64GuestCr0 &= fZapCr0;
5786 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
5787
5788 /* Commit the CR0 and related fields to the guest VMCS. */
5789 int rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
5790 rc = VMXWriteVmcsNw(VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
5791 if (uProcCtls != pVmcsInfo->u32ProcCtls)
5792 {
5793 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
5794 AssertRC(rc);
5795 }
5796 if (uXcptBitmap != pVmcsInfo->u32XcptBitmap)
5797 {
5798 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
5799 AssertRC(rc);
5800 }
5801
5802 /* Update our caches. */
5803 pVmcsInfo->u32ProcCtls = uProcCtls;
5804 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
5805
5806 Log4Func(("cr0=%#RX64 shadow=%#RX64 set=%#RX64 zap=%#RX64\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
5807 }
5808 else
5809 {
5810 /*
5811 * With nested-guests, we may have extended the guest/host mask here since we
5812 * merged in the outer guest's mask. Thus, the merged mask can include more bits
5813 * (to read from the nested-guest CR0 read-shadow) than the nested hypervisor
5814 * originally supplied. We must copy those bits from the nested-guest CR0 into
5815 * the nested-guest CR0 read-shadow.
5816 */
5817 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
5818 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0;
5819 uint64_t const u64ShadowCr0 = CPUMGetGuestVmxMaskedCr0(&pVCpu->cpum.GstCtx, pVmcsInfo->u64Cr0Mask);
5820 Assert(!RT_HI_U32(u64GuestCr0));
5821 Assert(u64GuestCr0 & X86_CR0_NE);
5822
5823 /* Apply the hardware specified CR0 fixed bits and enable caching. */
5824 u64GuestCr0 |= fSetCr0;
5825 u64GuestCr0 &= fZapCr0;
5826 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW);
5827
5828 /* Commit the CR0 and CR0 read-shadow to the nested-guest VMCS. */
5829 int rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_CR0, u64GuestCr0); AssertRC(rc);
5830 rc = VMXWriteVmcsNw(VMX_VMCS_CTRL_CR0_READ_SHADOW, u64ShadowCr0); AssertRC(rc);
5831
5832 Log4Func(("cr0=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr0, u64ShadowCr0, fSetCr0, fZapCr0));
5833 }
5834
5835 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR0);
5836 }
5837
5838 return VINF_SUCCESS;
5839}
5840
5841
5842/**
5843 * Exports the guest control registers (CR3, CR4) into the guest-state area
5844 * in the VMCS.
5845 *
5846 * @returns VBox strict status code.
5847 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
5848 * without unrestricted guest access and the VMMDev is not presently
5849 * mapped (e.g. EFI32).
5850 *
5851 * @param pVCpu The cross context virtual CPU structure.
5852 * @param pVmxTransient The VMX-transient structure.
5853 *
5854 * @remarks No-long-jump zone!!!
5855 */
5856static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
5857{
5858 int rc = VINF_SUCCESS;
5859 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5860
5861 /*
5862 * Guest CR2.
5863 * It's always loaded in the assembler code. Nothing to do here.
5864 */
5865
5866 /*
5867 * Guest CR3.
5868 */
5869 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR3)
5870 {
5871 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
5872
5873 if (pVM->hmr0.s.fNestedPaging)
5874 {
5875 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5876 pVmcsInfo->HCPhysEPTP = PGMGetHyperCR3(pVCpu);
5877
5878 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
5879 Assert(pVmcsInfo->HCPhysEPTP != NIL_RTHCPHYS);
5880 Assert(!(pVmcsInfo->HCPhysEPTP & UINT64_C(0xfff0000000000000)));
5881 Assert(!(pVmcsInfo->HCPhysEPTP & 0xfff));
5882
5883 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
5884 pVmcsInfo->HCPhysEPTP |= RT_BF_MAKE(VMX_BF_EPTP_MEMTYPE, VMX_EPTP_MEMTYPE_WB)
5885 | RT_BF_MAKE(VMX_BF_EPTP_PAGE_WALK_LENGTH, VMX_EPTP_PAGE_WALK_LENGTH_4);
5886
5887 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
5888 AssertMsg( ((pVmcsInfo->HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
5889 && ((pVmcsInfo->HCPhysEPTP >> 7) & 0x1f) == 0, /* Bits 7:11 MBZ. */
5890 ("EPTP %#RX64\n", pVmcsInfo->HCPhysEPTP));
5891 AssertMsg( !((pVmcsInfo->HCPhysEPTP >> 6) & 0x01) /* Bit 6 (EPT accessed & dirty bit). */
5892 || (g_HmMsrs.u.vmx.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY),
5893 ("EPTP accessed/dirty bit not supported by CPU but set %#RX64\n", pVmcsInfo->HCPhysEPTP));
5894
5895 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVmcsInfo->HCPhysEPTP);
5896 AssertRC(rc);
5897
5898 uint64_t u64GuestCr3;
5899 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5900 if ( pVM->hmr0.s.vmx.fUnrestrictedGuest
5901 || CPUMIsGuestPagingEnabledEx(pCtx))
5902 {
5903 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
5904 if (CPUMIsGuestInPAEModeEx(pCtx))
5905 {
5906 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pCtx->aPaePdpes[0].u); AssertRC(rc);
5907 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pCtx->aPaePdpes[1].u); AssertRC(rc);
5908 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pCtx->aPaePdpes[2].u); AssertRC(rc);
5909 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pCtx->aPaePdpes[3].u); AssertRC(rc);
5910 }
5911
5912 /*
5913 * The guest's view of its CR3 is unblemished with nested paging when the
5914 * guest is using paging or we have unrestricted guest execution to handle
5915 * the guest when it's not using paging.
5916 */
5917 u64GuestCr3 = pCtx->cr3;
5918 }
5919 else
5920 {
5921 /*
5922 * The guest is not using paging, but the CPU (VT-x) has to. While the guest
5923 * thinks it accesses physical memory directly, we use our identity-mapped
5924 * page table to map guest-linear to guest-physical addresses. EPT takes care
5925 * of translating it to host-physical addresses.
5926 */
5927 RTGCPHYS GCPhys;
5928 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
5929
5930 /* We obtain it here every time as the guest could have relocated this PCI region. */
5931 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
5932 if (RT_SUCCESS(rc))
5933 { /* likely */ }
5934 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS)
5935 {
5936 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n"));
5937 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */
5938 }
5939 else
5940 AssertMsgFailedReturn(("%Rrc\n", rc), rc);
5941
5942 u64GuestCr3 = GCPhys;
5943 }
5944
5945 Log4Func(("guest_cr3=%#RX64 (GstN)\n", u64GuestCr3));
5946 rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_CR3, u64GuestCr3);
5947 AssertRC(rc);
5948 }
5949 else
5950 {
5951 Assert(!pVmxTransient->fIsNestedGuest);
5952 /* Non-nested paging case, just use the hypervisor's CR3. */
5953 RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
5954
5955 Log4Func(("guest_cr3=%#RX64 (HstN)\n", HCPhysGuestCr3));
5956 rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
5957 AssertRC(rc);
5958 }
5959
5960 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR3);
5961 }
5962
5963 /*
5964 * Guest CR4.
5965 * ASSUMES this is done everytime we get in from ring-3! (XCR0)
5966 */
5967 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR4)
5968 {
5969 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
5970 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
5971
5972 uint64_t const fSetCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed0;
5973 uint64_t const fZapCr4 = g_HmMsrs.u.vmx.u64Cr4Fixed1;
5974
5975 /*
5976 * With nested-guests, we may have extended the guest/host mask here (since we
5977 * merged in the outer guest's mask, see hmR0VmxMergeVmcsNested). This means, the
5978 * mask can include more bits (to read from the nested-guest CR4 read-shadow) than
5979 * the nested hypervisor originally supplied. Thus, we should, in essence, copy
5980 * those bits from the nested-guest CR4 into the nested-guest CR4 read-shadow.
5981 */
5982 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
5983 uint64_t u64GuestCr4 = pCtx->cr4;
5984 uint64_t const u64ShadowCr4 = !pVmxTransient->fIsNestedGuest
5985 ? pCtx->cr4
5986 : CPUMGetGuestVmxMaskedCr4(pCtx, pVmcsInfo->u64Cr4Mask);
5987 Assert(!RT_HI_U32(u64GuestCr4));
5988
5989 /*
5990 * Setup VT-x's view of the guest CR4.
5991 *
5992 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software
5993 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt
5994 * redirection bitmap is already all 0, see hmR3InitFinalizeR0())
5995 *
5996 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
5997 */
5998 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
5999 {
6000 Assert(pVM->hm.s.vmx.pRealModeTSS);
6001 Assert(PDMVmmDevHeapIsEnabled(pVM));
6002 u64GuestCr4 &= ~(uint64_t)X86_CR4_VME;
6003 }
6004
6005 if (pVM->hmr0.s.fNestedPaging)
6006 {
6007 if ( !CPUMIsGuestPagingEnabledEx(pCtx)
6008 && !pVM->hmr0.s.vmx.fUnrestrictedGuest)
6009 {
6010 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
6011 u64GuestCr4 |= X86_CR4_PSE;
6012 /* Our identity mapping is a 32-bit page directory. */
6013 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
6014 }
6015 /* else use guest CR4.*/
6016 }
6017 else
6018 {
6019 Assert(!pVmxTransient->fIsNestedGuest);
6020
6021 /*
6022 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
6023 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
6024 */
6025 switch (pVCpu->hm.s.enmShadowMode)
6026 {
6027 case PGMMODE_REAL: /* Real-mode. */
6028 case PGMMODE_PROTECTED: /* Protected mode without paging. */
6029 case PGMMODE_32_BIT: /* 32-bit paging. */
6030 {
6031 u64GuestCr4 &= ~(uint64_t)X86_CR4_PAE;
6032 break;
6033 }
6034
6035 case PGMMODE_PAE: /* PAE paging. */
6036 case PGMMODE_PAE_NX: /* PAE paging with NX. */
6037 {
6038 u64GuestCr4 |= X86_CR4_PAE;
6039 break;
6040 }
6041
6042 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
6043 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
6044 {
6045#ifdef VBOX_WITH_64_BITS_GUESTS
6046 /* For our assumption in hmR0VmxShouldSwapEferMsr. */
6047 Assert(u64GuestCr4 & X86_CR4_PAE);
6048 break;
6049#endif
6050 }
6051 default:
6052 AssertFailed();
6053 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
6054 }
6055 }
6056
6057 /* Apply the hardware specified CR4 fixed bits (mainly CR4.VMXE). */
6058 u64GuestCr4 |= fSetCr4;
6059 u64GuestCr4 &= fZapCr4;
6060
6061 /* Commit the CR4 and CR4 read-shadow to the guest VMCS. */
6062 rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_CR4, u64GuestCr4); AssertRC(rc);
6063 rc = VMXWriteVmcsNw(VMX_VMCS_CTRL_CR4_READ_SHADOW, u64ShadowCr4); AssertRC(rc);
6064
6065 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */
6066 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
6067 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
6068 {
6069 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
6070 hmR0VmxUpdateStartVmFunction(pVCpu);
6071 }
6072
6073 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4);
6074
6075 Log4Func(("cr4=%#RX64 shadow=%#RX64 (set=%#RX64 zap=%#RX64)\n", u64GuestCr4, u64ShadowCr4, fSetCr4, fZapCr4));
6076 }
6077 return rc;
6078}
6079
6080
6081/**
6082 * Exports the guest debug registers into the guest-state area in the VMCS.
6083 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
6084 *
6085 * This also sets up whether \#DB and MOV DRx accesses cause VM-exits.
6086 *
6087 * @returns VBox status code.
6088 * @param pVCpu The cross context virtual CPU structure.
6089 * @param pVmxTransient The VMX-transient structure.
6090 *
6091 * @remarks No-long-jump zone!!!
6092 */
6093static int hmR0VmxExportSharedDebugState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
6094{
6095 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6096
6097 /** @todo NSTVMX: Figure out what we want to do with nested-guest instruction
6098 * stepping. */
6099 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6100 if (pVmxTransient->fIsNestedGuest)
6101 {
6102 int rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_DR7, CPUMGetGuestDR7(pVCpu));
6103 AssertRC(rc);
6104
6105 /*
6106 * We don't want to always intercept MOV DRx for nested-guests as it causes
6107 * problems when the nested hypervisor isn't intercepting them, see @bugref{10080}.
6108 * Instead, they are strictly only requested when the nested hypervisor intercepts
6109 * them -- handled while merging VMCS controls.
6110 *
6111 * If neither the outer nor the nested-hypervisor is intercepting MOV DRx,
6112 * then the nested-guest debug state should be actively loaded on the host so that
6113 * nested-guest reads its own debug registers without causing VM-exits.
6114 */
6115 if ( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT)
6116 && !CPUMIsGuestDebugStateActive(pVCpu))
6117 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
6118 return VINF_SUCCESS;
6119 }
6120
6121#ifdef VBOX_STRICT
6122 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
6123 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
6124 {
6125 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
6126 Assert((pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0);
6127 Assert((pVCpu->cpum.GstCtx.dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK);
6128 }
6129#endif
6130
6131 bool fSteppingDB = false;
6132 bool fInterceptMovDRx = false;
6133 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
6134 if (pVCpu->hm.s.fSingleInstruction)
6135 {
6136 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
6137 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MONITOR_TRAP_FLAG)
6138 {
6139 uProcCtls |= VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
6140 Assert(fSteppingDB == false);
6141 }
6142 else
6143 {
6144 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_TF;
6145 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_RFLAGS;
6146 pVCpu->hmr0.s.fClearTrapFlag = true;
6147 fSteppingDB = true;
6148 }
6149 }
6150
6151 uint64_t u64GuestDr7;
6152 if ( fSteppingDB
6153 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
6154 {
6155 /*
6156 * Use the combined guest and host DRx values found in the hypervisor register set
6157 * because the hypervisor debugger has breakpoints active or someone is single stepping
6158 * on the host side without a monitor trap flag.
6159 *
6160 * Note! DBGF expects a clean DR6 state before executing guest code.
6161 */
6162 if (!CPUMIsHyperDebugStateActive(pVCpu))
6163 {
6164 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
6165 Assert(CPUMIsHyperDebugStateActive(pVCpu));
6166 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
6167 }
6168
6169 /* Update DR7 with the hypervisor value (other DRx registers are handled by CPUM one way or another). */
6170 u64GuestDr7 = CPUMGetHyperDR7(pVCpu);
6171 pVCpu->hmr0.s.fUsingHyperDR7 = true;
6172 fInterceptMovDRx = true;
6173 }
6174 else
6175 {
6176 /*
6177 * If the guest has enabled debug registers, we need to load them prior to
6178 * executing guest code so they'll trigger at the right time.
6179 */
6180 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
6181 if (pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
6182 {
6183 if (!CPUMIsGuestDebugStateActive(pVCpu))
6184 {
6185 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
6186 Assert(CPUMIsGuestDebugStateActive(pVCpu));
6187 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
6188 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
6189 }
6190 Assert(!fInterceptMovDRx);
6191 }
6192 else if (!CPUMIsGuestDebugStateActive(pVCpu))
6193 {
6194 /*
6195 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
6196 * must intercept #DB in order to maintain a correct DR6 guest value, and
6197 * because we need to intercept it to prevent nested #DBs from hanging the
6198 * CPU, we end up always having to intercept it. See hmR0VmxSetupVmcsXcptBitmap().
6199 */
6200 fInterceptMovDRx = true;
6201 }
6202
6203 /* Update DR7 with the actual guest value. */
6204 u64GuestDr7 = pVCpu->cpum.GstCtx.dr[7];
6205 pVCpu->hmr0.s.fUsingHyperDR7 = false;
6206 }
6207
6208 if (fInterceptMovDRx)
6209 uProcCtls |= VMX_PROC_CTLS_MOV_DR_EXIT;
6210 else
6211 uProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
6212
6213 /*
6214 * Update the processor-based VM-execution controls with the MOV-DRx intercepts and the
6215 * monitor-trap flag and update our cache.
6216 */
6217 if (uProcCtls != pVmcsInfo->u32ProcCtls)
6218 {
6219 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
6220 AssertRC(rc);
6221 pVmcsInfo->u32ProcCtls = uProcCtls;
6222 }
6223
6224 /*
6225 * Update guest DR7.
6226 */
6227 int rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_DR7, u64GuestDr7);
6228 AssertRC(rc);
6229
6230 /*
6231 * If we have forced EFLAGS.TF to be set because we're single-stepping in the hypervisor debugger,
6232 * we need to clear interrupt inhibition if any as otherwise it causes a VM-entry failure.
6233 *
6234 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
6235 */
6236 if (fSteppingDB)
6237 {
6238 Assert(pVCpu->hm.s.fSingleInstruction);
6239 Assert(pVCpu->cpum.GstCtx.eflags.Bits.u1TF);
6240
6241 uint32_t fIntrState = 0;
6242 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
6243 AssertRC(rc);
6244
6245 if (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
6246 {
6247 fIntrState &= ~(VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
6248 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
6249 AssertRC(rc);
6250 }
6251 }
6252
6253 return VINF_SUCCESS;
6254}
6255
6256
6257#ifdef VBOX_STRICT
6258/**
6259 * Strict function to validate segment registers.
6260 *
6261 * @param pVCpu The cross context virtual CPU structure.
6262 * @param pVmcsInfo The VMCS info. object.
6263 *
6264 * @remarks Will import guest CR0 on strict builds during validation of
6265 * segments.
6266 */
6267static void hmR0VmxValidateSegmentRegs(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
6268{
6269 /*
6270 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
6271 *
6272 * The reason we check for attribute value 0 in this function and not just the unusable bit is
6273 * because hmR0VmxExportGuestSegReg() only updates the VMCS' copy of the value with the
6274 * unusable bit and doesn't change the guest-context value.
6275 */
6276 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6277 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6278 hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR0);
6279 if ( !pVM->hmr0.s.vmx.fUnrestrictedGuest
6280 && ( !CPUMIsGuestInRealModeEx(pCtx)
6281 && !CPUMIsGuestInV86ModeEx(pCtx)))
6282 {
6283 /* Protected mode checks */
6284 /* CS */
6285 Assert(pCtx->cs.Attr.n.u1Present);
6286 Assert(!(pCtx->cs.Attr.u & 0xf00));
6287 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
6288 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
6289 || !(pCtx->cs.Attr.n.u1Granularity));
6290 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
6291 || (pCtx->cs.Attr.n.u1Granularity));
6292 /* CS cannot be loaded with NULL in protected mode. */
6293 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS? */
6294 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
6295 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
6296 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
6297 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
6298 else
6299 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
6300 /* SS */
6301 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
6302 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
6303 if ( !(pCtx->cr0 & X86_CR0_PE)
6304 || pCtx->cs.Attr.n.u4Type == 3)
6305 {
6306 Assert(!pCtx->ss.Attr.n.u2Dpl);
6307 }
6308 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
6309 {
6310 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
6311 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
6312 Assert(pCtx->ss.Attr.n.u1Present);
6313 Assert(!(pCtx->ss.Attr.u & 0xf00));
6314 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
6315 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
6316 || !(pCtx->ss.Attr.n.u1Granularity));
6317 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
6318 || (pCtx->ss.Attr.n.u1Granularity));
6319 }
6320 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSegReg(). */
6321 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
6322 {
6323 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
6324 Assert(pCtx->ds.Attr.n.u1Present);
6325 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
6326 Assert(!(pCtx->ds.Attr.u & 0xf00));
6327 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
6328 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
6329 || !(pCtx->ds.Attr.n.u1Granularity));
6330 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
6331 || (pCtx->ds.Attr.n.u1Granularity));
6332 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
6333 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
6334 }
6335 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
6336 {
6337 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
6338 Assert(pCtx->es.Attr.n.u1Present);
6339 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
6340 Assert(!(pCtx->es.Attr.u & 0xf00));
6341 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
6342 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
6343 || !(pCtx->es.Attr.n.u1Granularity));
6344 Assert( !(pCtx->es.u32Limit & 0xfff00000)
6345 || (pCtx->es.Attr.n.u1Granularity));
6346 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
6347 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
6348 }
6349 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
6350 {
6351 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
6352 Assert(pCtx->fs.Attr.n.u1Present);
6353 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
6354 Assert(!(pCtx->fs.Attr.u & 0xf00));
6355 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
6356 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
6357 || !(pCtx->fs.Attr.n.u1Granularity));
6358 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
6359 || (pCtx->fs.Attr.n.u1Granularity));
6360 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
6361 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
6362 }
6363 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
6364 {
6365 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
6366 Assert(pCtx->gs.Attr.n.u1Present);
6367 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
6368 Assert(!(pCtx->gs.Attr.u & 0xf00));
6369 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
6370 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
6371 || !(pCtx->gs.Attr.n.u1Granularity));
6372 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
6373 || (pCtx->gs.Attr.n.u1Granularity));
6374 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
6375 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
6376 }
6377 /* 64-bit capable CPUs. */
6378 Assert(!RT_HI_U32(pCtx->cs.u64Base));
6379 Assert(!pCtx->ss.Attr.u || !RT_HI_U32(pCtx->ss.u64Base));
6380 Assert(!pCtx->ds.Attr.u || !RT_HI_U32(pCtx->ds.u64Base));
6381 Assert(!pCtx->es.Attr.u || !RT_HI_U32(pCtx->es.u64Base));
6382 }
6383 else if ( CPUMIsGuestInV86ModeEx(pCtx)
6384 || ( CPUMIsGuestInRealModeEx(pCtx)
6385 && !pVM->hmr0.s.vmx.fUnrestrictedGuest))
6386 {
6387 /* Real and v86 mode checks. */
6388 /* hmR0VmxExportGuestSegReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
6389 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
6390 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
6391 {
6392 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3;
6393 u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
6394 }
6395 else
6396 {
6397 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
6398 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
6399 }
6400
6401 /* CS */
6402 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
6403 Assert(pCtx->cs.u32Limit == 0xffff);
6404 Assert(u32CSAttr == 0xf3);
6405 /* SS */
6406 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
6407 Assert(pCtx->ss.u32Limit == 0xffff);
6408 Assert(u32SSAttr == 0xf3);
6409 /* DS */
6410 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
6411 Assert(pCtx->ds.u32Limit == 0xffff);
6412 Assert(u32DSAttr == 0xf3);
6413 /* ES */
6414 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
6415 Assert(pCtx->es.u32Limit == 0xffff);
6416 Assert(u32ESAttr == 0xf3);
6417 /* FS */
6418 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
6419 Assert(pCtx->fs.u32Limit == 0xffff);
6420 Assert(u32FSAttr == 0xf3);
6421 /* GS */
6422 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
6423 Assert(pCtx->gs.u32Limit == 0xffff);
6424 Assert(u32GSAttr == 0xf3);
6425 /* 64-bit capable CPUs. */
6426 Assert(!RT_HI_U32(pCtx->cs.u64Base));
6427 Assert(!u32SSAttr || !RT_HI_U32(pCtx->ss.u64Base));
6428 Assert(!u32DSAttr || !RT_HI_U32(pCtx->ds.u64Base));
6429 Assert(!u32ESAttr || !RT_HI_U32(pCtx->es.u64Base));
6430 }
6431}
6432#endif /* VBOX_STRICT */
6433
6434
6435/**
6436 * Exports a guest segment register into the guest-state area in the VMCS.
6437 *
6438 * @returns VBox status code.
6439 * @param pVCpu The cross context virtual CPU structure.
6440 * @param pVmcsInfo The VMCS info. object.
6441 * @param iSegReg The segment register number (X86_SREG_XXX).
6442 * @param pSelReg Pointer to the segment selector.
6443 *
6444 * @remarks No-long-jump zone!!!
6445 */
6446static int hmR0VmxExportGuestSegReg(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t iSegReg, PCCPUMSELREG pSelReg)
6447{
6448 Assert(iSegReg < X86_SREG_COUNT);
6449
6450 uint32_t u32Access = pSelReg->Attr.u;
6451 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
6452 {
6453 /*
6454 * The way to differentiate between whether this is really a null selector or was just
6455 * a selector loaded with 0 in real-mode is using the segment attributes. A selector
6456 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we
6457 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures
6458 * NULL selectors loaded in protected-mode have their attribute as 0.
6459 */
6460 if (u32Access)
6461 { }
6462 else
6463 u32Access = X86DESCATTR_UNUSABLE;
6464 }
6465 else
6466 {
6467 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
6468 u32Access = 0xf3;
6469 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
6470 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
6471 RT_NOREF_PV(pVCpu);
6472 }
6473
6474 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
6475 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
6476 ("Access bit not set for usable segment. %.2s sel=%#x attr %#x\n", "ESCSSSDSFSGS" + iSegReg * 2, pSelReg, pSelReg->Attr.u));
6477
6478 /*
6479 * Commit it to the VMCS.
6480 */
6481 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
6482 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
6483 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
6484 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
6485 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_SEG_SEL(iSegReg), pSelReg->Sel); AssertRC(rc);
6486 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), pSelReg->u32Limit); AssertRC(rc);
6487 rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_SEG_BASE(iSegReg), pSelReg->u64Base); AssertRC(rc);
6488 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), u32Access); AssertRC(rc);
6489 return VINF_SUCCESS;
6490}
6491
6492
6493/**
6494 * Exports the guest segment registers, GDTR, IDTR, LDTR, TR into the guest-state
6495 * area in the VMCS.
6496 *
6497 * @returns VBox status code.
6498 * @param pVCpu The cross context virtual CPU structure.
6499 * @param pVmxTransient The VMX-transient structure.
6500 *
6501 * @remarks Will import guest CR0 on strict builds during validation of
6502 * segments.
6503 * @remarks No-long-jump zone!!!
6504 */
6505static int hmR0VmxExportGuestSegRegsXdtr(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
6506{
6507 int rc = VERR_INTERNAL_ERROR_5;
6508 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6509 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6510 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6511 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
6512
6513 /*
6514 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
6515 */
6516 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK)
6517 {
6518 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CS)
6519 {
6520 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS);
6521 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6522 pVmcsInfoShared->RealMode.AttrCS.u = pCtx->cs.Attr.u;
6523 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_CS, &pCtx->cs);
6524 AssertRC(rc);
6525 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CS);
6526 }
6527
6528 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SS)
6529 {
6530 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SS);
6531 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6532 pVmcsInfoShared->RealMode.AttrSS.u = pCtx->ss.Attr.u;
6533 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_SS, &pCtx->ss);
6534 AssertRC(rc);
6535 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SS);
6536 }
6537
6538 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_DS)
6539 {
6540 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DS);
6541 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6542 pVmcsInfoShared->RealMode.AttrDS.u = pCtx->ds.Attr.u;
6543 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_DS, &pCtx->ds);
6544 AssertRC(rc);
6545 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_DS);
6546 }
6547
6548 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_ES)
6549 {
6550 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_ES);
6551 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6552 pVmcsInfoShared->RealMode.AttrES.u = pCtx->es.Attr.u;
6553 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_ES, &pCtx->es);
6554 AssertRC(rc);
6555 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_ES);
6556 }
6557
6558 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_FS)
6559 {
6560 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_FS);
6561 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6562 pVmcsInfoShared->RealMode.AttrFS.u = pCtx->fs.Attr.u;
6563 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_FS, &pCtx->fs);
6564 AssertRC(rc);
6565 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_FS);
6566 }
6567
6568 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GS)
6569 {
6570 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GS);
6571 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
6572 pVmcsInfoShared->RealMode.AttrGS.u = pCtx->gs.Attr.u;
6573 rc = hmR0VmxExportGuestSegReg(pVCpu, pVmcsInfo, X86_SREG_GS, &pCtx->gs);
6574 AssertRC(rc);
6575 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GS);
6576 }
6577
6578#ifdef VBOX_STRICT
6579 hmR0VmxValidateSegmentRegs(pVCpu, pVmcsInfo);
6580#endif
6581 Log4Func(("cs={%#04x base=%#RX64 limit=%#RX32 attr=%#RX32}\n", pCtx->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit,
6582 pCtx->cs.Attr.u));
6583 }
6584
6585 /*
6586 * Guest TR.
6587 */
6588 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_TR)
6589 {
6590 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_TR);
6591
6592 /*
6593 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is
6594 * achieved using the interrupt redirection bitmap (all bits cleared to let the guest
6595 * handle INT-n's) in the TSS. See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
6596 */
6597 uint16_t u16Sel;
6598 uint32_t u32Limit;
6599 uint64_t u64Base;
6600 uint32_t u32AccessRights;
6601 if (!pVmcsInfoShared->RealMode.fRealOnV86Active)
6602 {
6603 u16Sel = pCtx->tr.Sel;
6604 u32Limit = pCtx->tr.u32Limit;
6605 u64Base = pCtx->tr.u64Base;
6606 u32AccessRights = pCtx->tr.Attr.u;
6607 }
6608 else
6609 {
6610 Assert(!pVmxTransient->fIsNestedGuest);
6611 Assert(pVM->hm.s.vmx.pRealModeTSS);
6612 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */
6613
6614 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
6615 RTGCPHYS GCPhys;
6616 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
6617 AssertRCReturn(rc, rc);
6618
6619 X86DESCATTR DescAttr;
6620 DescAttr.u = 0;
6621 DescAttr.n.u1Present = 1;
6622 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
6623
6624 u16Sel = 0;
6625 u32Limit = HM_VTX_TSS_SIZE;
6626 u64Base = GCPhys;
6627 u32AccessRights = DescAttr.u;
6628 }
6629
6630 /* Validate. */
6631 Assert(!(u16Sel & RT_BIT(2)));
6632 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
6633 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
6634 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
6635 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
6636 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
6637 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
6638 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
6639 Assert( (u32Limit & 0xfff) == 0xfff
6640 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
6641 Assert( !(pCtx->tr.u32Limit & 0xfff00000)
6642 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
6643
6644 rc = VMXWriteVmcs16(VMX_VMCS16_GUEST_TR_SEL, u16Sel); AssertRC(rc);
6645 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRC(rc);
6646 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRC(rc);
6647 rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRC(rc);
6648
6649 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_TR);
6650 Log4Func(("tr base=%#RX64 limit=%#RX32\n", pCtx->tr.u64Base, pCtx->tr.u32Limit));
6651 }
6652
6653 /*
6654 * Guest GDTR.
6655 */
6656 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GDTR)
6657 {
6658 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_GDTR);
6659
6660 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); AssertRC(rc);
6661 rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); AssertRC(rc);
6662
6663 /* Validate. */
6664 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
6665
6666 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GDTR);
6667 Log4Func(("gdtr base=%#RX64 limit=%#RX32\n", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt));
6668 }
6669
6670 /*
6671 * Guest LDTR.
6672 */
6673 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_LDTR)
6674 {
6675 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_LDTR);
6676
6677 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
6678 uint32_t u32Access;
6679 if ( !pVmxTransient->fIsNestedGuest
6680 && !pCtx->ldtr.Attr.u)
6681 u32Access = X86DESCATTR_UNUSABLE;
6682 else
6683 u32Access = pCtx->ldtr.Attr.u;
6684
6685 rc = VMXWriteVmcs16(VMX_VMCS16_GUEST_LDTR_SEL, pCtx->ldtr.Sel); AssertRC(rc);
6686 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit); AssertRC(rc);
6687 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRC(rc);
6688 rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); AssertRC(rc);
6689
6690 /* Validate. */
6691 if (!(u32Access & X86DESCATTR_UNUSABLE))
6692 {
6693 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
6694 Assert(pCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
6695 Assert(!pCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
6696 Assert(pCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
6697 Assert(!pCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
6698 Assert(!(pCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
6699 Assert( (pCtx->ldtr.u32Limit & 0xfff) == 0xfff
6700 || !pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
6701 Assert( !(pCtx->ldtr.u32Limit & 0xfff00000)
6702 || pCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
6703 }
6704
6705 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_LDTR);
6706 Log4Func(("ldtr base=%#RX64 limit=%#RX32\n", pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit));
6707 }
6708
6709 /*
6710 * Guest IDTR.
6711 */
6712 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_IDTR)
6713 {
6714 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_IDTR);
6715
6716 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); AssertRC(rc);
6717 rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt); AssertRC(rc);
6718
6719 /* Validate. */
6720 Assert(!(pCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
6721
6722 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_IDTR);
6723 Log4Func(("idtr base=%#RX64 limit=%#RX32\n", pCtx->idtr.pIdt, pCtx->idtr.cbIdt));
6724 }
6725
6726 return VINF_SUCCESS;
6727}
6728
6729
6730/**
6731 * Exports certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
6732 * areas.
6733 *
6734 * These MSRs will automatically be loaded to the host CPU on every successful
6735 * VM-entry and stored from the host CPU on every successful VM-exit.
6736 *
6737 * We creates/updates MSR slots for the host MSRs in the VM-exit MSR-load area. The
6738 * actual host MSR values are not- updated here for performance reasons. See
6739 * hmR0VmxExportHostMsrs().
6740 *
6741 * We also exports the guest sysenter MSRs into the guest-state area in the VMCS.
6742 *
6743 * @returns VBox status code.
6744 * @param pVCpu The cross context virtual CPU structure.
6745 * @param pVmxTransient The VMX-transient structure.
6746 *
6747 * @remarks No-long-jump zone!!!
6748 */
6749static int hmR0VmxExportGuestMsrs(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
6750{
6751 AssertPtr(pVCpu);
6752 AssertPtr(pVmxTransient);
6753
6754 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6755 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
6756
6757 /*
6758 * MSRs that we use the auto-load/store MSR area in the VMCS.
6759 * For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(),
6760 * nothing to do here. The host MSR values are updated when it's safe in
6761 * hmR0VmxLazySaveHostMsrs().
6762 *
6763 * For nested-guests, the guests MSRs from the VM-entry MSR-load area are already
6764 * loaded (into the guest-CPU context) by the VMLAUNCH/VMRESUME instruction
6765 * emulation. The merged MSR permission bitmap will ensure that we get VM-exits
6766 * for any MSR that are not part of the lazy MSRs so we do not need to place
6767 * those MSRs into the auto-load/store MSR area. Nothing to do here.
6768 */
6769 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
6770 {
6771 /* No auto-load/store MSRs currently. */
6772 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_AUTO_MSRS);
6773 }
6774
6775 /*
6776 * Guest Sysenter MSRs.
6777 */
6778 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK)
6779 {
6780 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SYSENTER_MSRS);
6781
6782 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
6783 {
6784 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pCtx->SysEnter.cs);
6785 AssertRC(rc);
6786 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR);
6787 }
6788
6789 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
6790 {
6791 int rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip);
6792 AssertRC(rc);
6793 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR);
6794 }
6795
6796 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
6797 {
6798 int rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp);
6799 AssertRC(rc);
6800 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
6801 }
6802 }
6803
6804 /*
6805 * Guest/host EFER MSR.
6806 */
6807 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR)
6808 {
6809 /* Whether we are using the VMCS to swap the EFER MSR must have been
6810 determined earlier while exporting VM-entry/VM-exit controls. */
6811 Assert(!(ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_EXIT_CTLS));
6812 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
6813
6814 if (hmR0VmxShouldSwapEferMsr(pVCpu, pVmxTransient))
6815 {
6816 /*
6817 * EFER.LME is written by software, while EFER.LMA is set by the CPU to (CR0.PG & EFER.LME).
6818 * This means a guest can set EFER.LME=1 while CR0.PG=0 and EFER.LMA can remain 0.
6819 * VT-x requires that "IA-32e mode guest" VM-entry control must be identical to EFER.LMA
6820 * and to CR0.PG. Without unrestricted execution, CR0.PG (used for VT-x, not the shadow)
6821 * must always be 1. This forces us to effectively clear both EFER.LMA and EFER.LME until
6822 * the guest has also set CR0.PG=1. Otherwise, we would run into an invalid-guest state
6823 * during VM-entry.
6824 */
6825 uint64_t uGuestEferMsr = pCtx->msrEFER;
6826 if (!pVM->hmr0.s.vmx.fUnrestrictedGuest)
6827 {
6828 if (!(pCtx->msrEFER & MSR_K6_EFER_LMA))
6829 uGuestEferMsr &= ~MSR_K6_EFER_LME;
6830 else
6831 Assert((pCtx->msrEFER & (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)) == (MSR_K6_EFER_LMA | MSR_K6_EFER_LME));
6832 }
6833
6834 /*
6835 * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
6836 * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
6837 */
6838 if (g_fHmVmxSupportsVmcsEfer)
6839 {
6840 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, uGuestEferMsr);
6841 AssertRC(rc);
6842 }
6843 else
6844 {
6845 /*
6846 * We shall use the auto-load/store MSR area only for loading the EFER MSR but we must
6847 * continue to intercept guest read and write accesses to it, see @bugref{7386#c16}.
6848 */
6849 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER, uGuestEferMsr,
6850 false /* fSetReadWrite */, false /* fUpdateHostMsr */);
6851 AssertRCReturn(rc, rc);
6852 }
6853
6854 Log4Func(("efer=%#RX64 shadow=%#RX64\n", uGuestEferMsr, pCtx->msrEFER));
6855 }
6856 else if (!g_fHmVmxSupportsVmcsEfer)
6857 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER);
6858
6859 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);
6860 }
6861
6862 /*
6863 * Other MSRs.
6864 */
6865 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_OTHER_MSRS)
6866 {
6867 /* Speculation Control (R/W). */
6868 HMVMX_CPUMCTX_ASSERT(pVCpu, HM_CHANGED_GUEST_OTHER_MSRS);
6869 if (pVM->cpum.ro.GuestFeatures.fIbrs)
6870 {
6871 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu),
6872 false /* fSetReadWrite */, false /* fUpdateHostMsr */);
6873 AssertRCReturn(rc, rc);
6874 }
6875
6876 /* Last Branch Record. */
6877 if (pVM->hmr0.s.vmx.fLbr)
6878 {
6879 PVMXVMCSINFOSHARED const pVmcsInfoShared = pVmxTransient->pVmcsInfo->pShared;
6880 uint32_t const idFromIpMsrStart = pVM->hmr0.s.vmx.idLbrFromIpMsrFirst;
6881 uint32_t const idToIpMsrStart = pVM->hmr0.s.vmx.idLbrToIpMsrFirst;
6882 uint32_t const cLbrStack = pVM->hmr0.s.vmx.idLbrFromIpMsrLast - pVM->hmr0.s.vmx.idLbrFromIpMsrFirst + 1;
6883 Assert(cLbrStack <= 32);
6884 for (uint32_t i = 0; i < cLbrStack; i++)
6885 {
6886 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, idFromIpMsrStart + i,
6887 pVmcsInfoShared->au64LbrFromIpMsr[i],
6888 false /* fSetReadWrite */, false /* fUpdateHostMsr */);
6889 AssertRCReturn(rc, rc);
6890
6891 /* Some CPUs don't have a Branch-To-IP MSR (P4 and related Xeons). */
6892 if (idToIpMsrStart != 0)
6893 {
6894 rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, idToIpMsrStart + i,
6895 pVmcsInfoShared->au64LbrToIpMsr[i],
6896 false /* fSetReadWrite */, false /* fUpdateHostMsr */);
6897 AssertRCReturn(rc, rc);
6898 }
6899 }
6900
6901 /* Add LBR top-of-stack MSR (which contains the index to the most recent record). */
6902 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, pVM->hmr0.s.vmx.idLbrTosMsr,
6903 pVmcsInfoShared->u64LbrTosMsr, false /* fSetReadWrite */,
6904 false /* fUpdateHostMsr */);
6905 AssertRCReturn(rc, rc);
6906 }
6907
6908 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS);
6909 }
6910
6911 return VINF_SUCCESS;
6912}
6913
6914
6915/**
6916 * Wrapper for running the guest code in VT-x.
6917 *
6918 * @returns VBox status code, no informational status codes.
6919 * @param pVCpu The cross context virtual CPU structure.
6920 * @param pVmxTransient The VMX-transient structure.
6921 *
6922 * @remarks No-long-jump zone!!!
6923 */
6924DECLINLINE(int) hmR0VmxRunGuest(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient)
6925{
6926 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */
6927 pVCpu->cpum.GstCtx.fExtrn |= HMVMX_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM;
6928
6929 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
6930 bool const fResumeVM = RT_BOOL(pVmcsInfo->fVmcsState & VMX_V_VMCS_LAUNCH_STATE_LAUNCHED);
6931#ifdef VBOX_WITH_STATISTICS
6932 if (fResumeVM)
6933 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxVmResume);
6934 else
6935 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxVmLaunch);
6936#endif
6937 int rc = pVCpu->hmr0.s.vmx.pfnStartVm(pVmcsInfo, pVCpu, fResumeVM);
6938 AssertMsg(rc <= VINF_SUCCESS, ("%Rrc\n", rc));
6939 return rc;
6940}
6941
6942
6943/**
6944 * Reports world-switch error and dumps some useful debug info.
6945 *
6946 * @param pVCpu The cross context virtual CPU structure.
6947 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
6948 * @param pVmxTransient The VMX-transient structure (only
6949 * exitReason updated).
6950 */
6951static void hmR0VmxReportWorldSwitchError(PVMCPUCC pVCpu, int rcVMRun, PVMXTRANSIENT pVmxTransient)
6952{
6953 Assert(pVCpu);
6954 Assert(pVmxTransient);
6955 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
6956
6957 Log4Func(("VM-entry failure: %Rrc\n", rcVMRun));
6958 switch (rcVMRun)
6959 {
6960 case VERR_VMX_INVALID_VMXON_PTR:
6961 AssertFailed();
6962 break;
6963 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
6964 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
6965 {
6966 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
6967 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
6968 AssertRC(rc);
6969 hmR0VmxReadExitQualVmcs(pVmxTransient);
6970
6971 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hmr0.s.idEnteredCpu;
6972 /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted().
6973 Cannot do it here as we may have been long preempted. */
6974
6975#ifdef VBOX_STRICT
6976 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
6977 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
6978 pVmxTransient->uExitReason));
6979 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQual));
6980 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
6981 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
6982 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
6983 else
6984 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
6985 Log4(("Entered host CPU %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));
6986 Log4(("Current host CPU %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));
6987
6988 static struct
6989 {
6990 /** Name of the field to log. */
6991 const char *pszName;
6992 /** The VMCS field. */
6993 uint32_t uVmcsField;
6994 /** Whether host support of this field needs to be checked. */
6995 bool fCheckSupport;
6996 } const s_aVmcsFields[] =
6997 {
6998 { "VMX_VMCS32_CTRL_PIN_EXEC", VMX_VMCS32_CTRL_PIN_EXEC, false },
6999 { "VMX_VMCS32_CTRL_PROC_EXEC", VMX_VMCS32_CTRL_PROC_EXEC, false },
7000 { "VMX_VMCS32_CTRL_PROC_EXEC2", VMX_VMCS32_CTRL_PROC_EXEC2, true },
7001 { "VMX_VMCS32_CTRL_ENTRY", VMX_VMCS32_CTRL_ENTRY, false },
7002 { "VMX_VMCS32_CTRL_EXIT", VMX_VMCS32_CTRL_EXIT, false },
7003 { "VMX_VMCS32_CTRL_CR3_TARGET_COUNT", VMX_VMCS32_CTRL_CR3_TARGET_COUNT, false },
7004 { "VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO", VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, false },
7005 { "VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE", VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, false },
7006 { "VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH", VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, false },
7007 { "VMX_VMCS32_CTRL_TPR_THRESHOLD", VMX_VMCS32_CTRL_TPR_THRESHOLD, false },
7008 { "VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT", VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, false },
7009 { "VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT", VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, false },
7010 { "VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT", VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, false },
7011 { "VMX_VMCS32_CTRL_EXCEPTION_BITMAP", VMX_VMCS32_CTRL_EXCEPTION_BITMAP, false },
7012 { "VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK", VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, false },
7013 { "VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH", VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, false },
7014 { "VMX_VMCS_CTRL_CR0_MASK", VMX_VMCS_CTRL_CR0_MASK, false },
7015 { "VMX_VMCS_CTRL_CR0_READ_SHADOW", VMX_VMCS_CTRL_CR0_READ_SHADOW, false },
7016 { "VMX_VMCS_CTRL_CR4_MASK", VMX_VMCS_CTRL_CR4_MASK, false },
7017 { "VMX_VMCS_CTRL_CR4_READ_SHADOW", VMX_VMCS_CTRL_CR4_READ_SHADOW, false },
7018 { "VMX_VMCS64_CTRL_EPTP_FULL", VMX_VMCS64_CTRL_EPTP_FULL, true },
7019 { "VMX_VMCS_GUEST_RIP", VMX_VMCS_GUEST_RIP, false },
7020 { "VMX_VMCS_GUEST_RSP", VMX_VMCS_GUEST_RSP, false },
7021 { "VMX_VMCS_GUEST_RFLAGS", VMX_VMCS_GUEST_RFLAGS, false },
7022 { "VMX_VMCS16_VPID", VMX_VMCS16_VPID, true, },
7023 { "VMX_VMCS_HOST_CR0", VMX_VMCS_HOST_CR0, false },
7024 { "VMX_VMCS_HOST_CR3", VMX_VMCS_HOST_CR3, false },
7025 { "VMX_VMCS_HOST_CR4", VMX_VMCS_HOST_CR4, false },
7026 /* The order of selector fields below are fixed! */
7027 { "VMX_VMCS16_HOST_ES_SEL", VMX_VMCS16_HOST_ES_SEL, false },
7028 { "VMX_VMCS16_HOST_CS_SEL", VMX_VMCS16_HOST_CS_SEL, false },
7029 { "VMX_VMCS16_HOST_SS_SEL", VMX_VMCS16_HOST_SS_SEL, false },
7030 { "VMX_VMCS16_HOST_DS_SEL", VMX_VMCS16_HOST_DS_SEL, false },
7031 { "VMX_VMCS16_HOST_FS_SEL", VMX_VMCS16_HOST_FS_SEL, false },
7032 { "VMX_VMCS16_HOST_GS_SEL", VMX_VMCS16_HOST_GS_SEL, false },
7033 { "VMX_VMCS16_HOST_TR_SEL", VMX_VMCS16_HOST_TR_SEL, false },
7034 /* End of ordered selector fields. */
7035 { "VMX_VMCS_HOST_TR_BASE", VMX_VMCS_HOST_TR_BASE, false },
7036 { "VMX_VMCS_HOST_GDTR_BASE", VMX_VMCS_HOST_GDTR_BASE, false },
7037 { "VMX_VMCS_HOST_IDTR_BASE", VMX_VMCS_HOST_IDTR_BASE, false },
7038 { "VMX_VMCS32_HOST_SYSENTER_CS", VMX_VMCS32_HOST_SYSENTER_CS, false },
7039 { "VMX_VMCS_HOST_SYSENTER_EIP", VMX_VMCS_HOST_SYSENTER_EIP, false },
7040 { "VMX_VMCS_HOST_SYSENTER_ESP", VMX_VMCS_HOST_SYSENTER_ESP, false },
7041 { "VMX_VMCS_HOST_RSP", VMX_VMCS_HOST_RSP, false },
7042 { "VMX_VMCS_HOST_RIP", VMX_VMCS_HOST_RIP, false }
7043 };
7044
7045 RTGDTR HostGdtr;
7046 ASMGetGDTR(&HostGdtr);
7047
7048 uint32_t const cVmcsFields = RT_ELEMENTS(s_aVmcsFields);
7049 for (uint32_t i = 0; i < cVmcsFields; i++)
7050 {
7051 uint32_t const uVmcsField = s_aVmcsFields[i].uVmcsField;
7052
7053 bool fSupported;
7054 if (!s_aVmcsFields[i].fCheckSupport)
7055 fSupported = true;
7056 else
7057 {
7058 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7059 switch (uVmcsField)
7060 {
7061 case VMX_VMCS64_CTRL_EPTP_FULL: fSupported = pVM->hmr0.s.fNestedPaging; break;
7062 case VMX_VMCS16_VPID: fSupported = pVM->hmr0.s.vmx.fVpid; break;
7063 case VMX_VMCS32_CTRL_PROC_EXEC2:
7064 fSupported = RT_BOOL(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS);
7065 break;
7066 default:
7067 AssertMsgFailedReturnVoid(("Failed to provide VMCS field support for %#RX32\n", uVmcsField));
7068 }
7069 }
7070
7071 if (fSupported)
7072 {
7073 uint8_t const uWidth = RT_BF_GET(uVmcsField, VMX_BF_VMCSFIELD_WIDTH);
7074 switch (uWidth)
7075 {
7076 case VMX_VMCSFIELD_WIDTH_16BIT:
7077 {
7078 uint16_t u16Val;
7079 rc = VMXReadVmcs16(uVmcsField, &u16Val);
7080 AssertRC(rc);
7081 Log4(("%-40s = %#RX16\n", s_aVmcsFields[i].pszName, u16Val));
7082
7083 if ( uVmcsField >= VMX_VMCS16_HOST_ES_SEL
7084 && uVmcsField <= VMX_VMCS16_HOST_TR_SEL)
7085 {
7086 if (u16Val < HostGdtr.cbGdt)
7087 {
7088 /* Order of selectors in s_apszSel is fixed and matches the order in s_aVmcsFields. */
7089 static const char * const s_apszSel[] = { "Host ES", "Host CS", "Host SS", "Host DS",
7090 "Host FS", "Host GS", "Host TR" };
7091 uint8_t const idxSel = RT_BF_GET(uVmcsField, VMX_BF_VMCSFIELD_INDEX);
7092 Assert(idxSel < RT_ELEMENTS(s_apszSel));
7093 PCX86DESCHC pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u16Val & X86_SEL_MASK));
7094 hmR0DumpDescriptor(pDesc, u16Val, s_apszSel[idxSel]);
7095 }
7096 else
7097 Log4((" Selector value exceeds GDT limit!\n"));
7098 }
7099 break;
7100 }
7101
7102 case VMX_VMCSFIELD_WIDTH_32BIT:
7103 {
7104 uint32_t u32Val;
7105 rc = VMXReadVmcs32(uVmcsField, &u32Val);
7106 AssertRC(rc);
7107 Log4(("%-40s = %#RX32\n", s_aVmcsFields[i].pszName, u32Val));
7108 break;
7109 }
7110
7111 case VMX_VMCSFIELD_WIDTH_64BIT:
7112 case VMX_VMCSFIELD_WIDTH_NATURAL:
7113 {
7114 uint64_t u64Val;
7115 rc = VMXReadVmcs64(uVmcsField, &u64Val);
7116 AssertRC(rc);
7117 Log4(("%-40s = %#RX64\n", s_aVmcsFields[i].pszName, u64Val));
7118 break;
7119 }
7120 }
7121 }
7122 }
7123
7124 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
7125 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
7126 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
7127 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
7128 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
7129 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
7130#endif /* VBOX_STRICT */
7131 break;
7132 }
7133
7134 default:
7135 /* Impossible */
7136 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
7137 break;
7138 }
7139}
7140
7141
7142/**
7143 * Sets up the usage of TSC-offsetting and updates the VMCS.
7144 *
7145 * If offsetting is not possible, cause VM-exits on RDTSC(P)s. Also sets up the
7146 * VMX-preemption timer.
7147 *
7148 * @returns VBox status code.
7149 * @param pVCpu The cross context virtual CPU structure.
7150 * @param pVmxTransient The VMX-transient structure.
7151 * @param idCurrentCpu The current CPU number.
7152 *
7153 * @remarks No-long-jump zone!!!
7154 */
7155static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, RTCPUID idCurrentCpu)
7156{
7157 bool fOffsettedTsc;
7158 bool fParavirtTsc;
7159 uint64_t uTscOffset;
7160 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7161 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
7162
7163 if (pVM->hmr0.s.vmx.fUsePreemptTimer)
7164 {
7165 /* The TMCpuTickGetDeadlineAndTscOffset function is expensive (calling it on
7166 every entry slowed down the bs2-test1 CPUID testcase by ~33% (on an 10980xe). */
7167 uint64_t cTicksToDeadline;
7168 if ( idCurrentCpu == pVCpu->hmr0.s.idLastCpu
7169 && TMVirtualSyncIsCurrentDeadlineVersion(pVM, pVCpu->hmr0.s.vmx.uTscDeadlineVersion))
7170 {
7171 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatVmxPreemptionReusingDeadline);
7172 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
7173 cTicksToDeadline = pVCpu->hmr0.s.vmx.uTscDeadline - SUPReadTsc();
7174 if ((int64_t)cTicksToDeadline > 0)
7175 { /* hopefully */ }
7176 else
7177 {
7178 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatVmxPreemptionReusingDeadlineExpired);
7179 cTicksToDeadline = 0;
7180 }
7181 }
7182 else
7183 {
7184 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatVmxPreemptionRecalcingDeadline);
7185 cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &uTscOffset, &fOffsettedTsc, &fParavirtTsc,
7186 &pVCpu->hmr0.s.vmx.uTscDeadline,
7187 &pVCpu->hmr0.s.vmx.uTscDeadlineVersion);
7188 pVCpu->hmr0.s.vmx.uTscDeadline += cTicksToDeadline;
7189 if (cTicksToDeadline >= 128)
7190 { /* hopefully */ }
7191 else
7192 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatVmxPreemptionRecalcingDeadlineExpired);
7193 }
7194
7195 /* Make sure the returned values have sane upper and lower boundaries. */
7196 uint64_t const u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
7197 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second, 15.625ms. */ /** @todo r=bird: Once real+virtual timers move to separate thread, we can raise the upper limit (16ms isn't much). ASSUMES working poke cpu function. */
7198 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 32678); /* 1/32768th of a second, ~30us. */
7199 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
7200
7201 /** @todo r=ramshankar: We need to find a way to integrate nested-guest
7202 * preemption timers here. We probably need to clamp the preemption timer,
7203 * after converting the timer value to the host. */
7204 uint32_t const cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
7205 int rc = VMXWriteVmcs32(VMX_VMCS32_PREEMPT_TIMER_VALUE, cPreemptionTickCount);
7206 AssertRC(rc);
7207 }
7208 else
7209 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
7210
7211 if (fParavirtTsc)
7212 {
7213 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC
7214 information before every VM-entry, hence disable it for performance sake. */
7215#if 0
7216 int rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
7217 AssertRC(rc);
7218#endif
7219 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
7220 }
7221
7222 if ( fOffsettedTsc
7223 && RT_LIKELY(!pVCpu->hmr0.s.fDebugWantRdTscExit))
7224 {
7225 if (pVmxTransient->fIsNestedGuest)
7226 uTscOffset = CPUMApplyNestedGuestTscOffset(pVCpu, uTscOffset);
7227 hmR0VmxSetTscOffsetVmcs(pVmcsInfo, uTscOffset);
7228 hmR0VmxRemoveProcCtlsVmcs(pVCpu, pVmxTransient, VMX_PROC_CTLS_RDTSC_EXIT);
7229 }
7230 else
7231 {
7232 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
7233 hmR0VmxSetProcCtlsVmcs(pVmxTransient, VMX_PROC_CTLS_RDTSC_EXIT);
7234 }
7235}
7236
7237
7238/**
7239 * Gets the IEM exception flags for the specified vector and IDT vectoring /
7240 * VM-exit interruption info type.
7241 *
7242 * @returns The IEM exception flags.
7243 * @param uVector The event vector.
7244 * @param uVmxEventType The VMX event type.
7245 *
7246 * @remarks This function currently only constructs flags required for
7247 * IEMEvaluateRecursiveXcpt and not the complete flags (e.g, error-code
7248 * and CR2 aspects of an exception are not included).
7249 */
7250static uint32_t hmR0VmxGetIemXcptFlags(uint8_t uVector, uint32_t uVmxEventType)
7251{
7252 uint32_t fIemXcptFlags;
7253 switch (uVmxEventType)
7254 {
7255 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
7256 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
7257 fIemXcptFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
7258 break;
7259
7260 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
7261 fIemXcptFlags = IEM_XCPT_FLAGS_T_EXT_INT;
7262 break;
7263
7264 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
7265 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR;
7266 break;
7267
7268 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
7269 {
7270 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
7271 if (uVector == X86_XCPT_BP)
7272 fIemXcptFlags |= IEM_XCPT_FLAGS_BP_INSTR;
7273 else if (uVector == X86_XCPT_OF)
7274 fIemXcptFlags |= IEM_XCPT_FLAGS_OF_INSTR;
7275 else
7276 {
7277 fIemXcptFlags = 0;
7278 AssertMsgFailed(("Unexpected vector for software exception. uVector=%#x", uVector));
7279 }
7280 break;
7281 }
7282
7283 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
7284 fIemXcptFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
7285 break;
7286
7287 default:
7288 fIemXcptFlags = 0;
7289 AssertMsgFailed(("Unexpected vector type! uVmxEventType=%#x uVector=%#x", uVmxEventType, uVector));
7290 break;
7291 }
7292 return fIemXcptFlags;
7293}
7294
7295
7296/**
7297 * Sets an event as a pending event to be injected into the guest.
7298 *
7299 * @param pVCpu The cross context virtual CPU structure.
7300 * @param u32IntInfo The VM-entry interruption-information field.
7301 * @param cbInstr The VM-entry instruction length in bytes (for
7302 * software interrupts, exceptions and privileged
7303 * software exceptions).
7304 * @param u32ErrCode The VM-entry exception error code.
7305 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
7306 * page-fault.
7307 */
7308DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPUCC pVCpu, uint32_t u32IntInfo, uint32_t cbInstr, uint32_t u32ErrCode,
7309 RTGCUINTPTR GCPtrFaultAddress)
7310{
7311 Assert(!pVCpu->hm.s.Event.fPending);
7312 pVCpu->hm.s.Event.fPending = true;
7313 pVCpu->hm.s.Event.u64IntInfo = u32IntInfo;
7314 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
7315 pVCpu->hm.s.Event.cbInstr = cbInstr;
7316 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
7317}
7318
7319
7320/**
7321 * Sets an external interrupt as pending-for-injection into the VM.
7322 *
7323 * @param pVCpu The cross context virtual CPU structure.
7324 * @param u8Interrupt The external interrupt vector.
7325 */
7326DECLINLINE(void) hmR0VmxSetPendingExtInt(PVMCPUCC pVCpu, uint8_t u8Interrupt)
7327{
7328 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, u8Interrupt)
7329 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
7330 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
7331 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
7332 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7333}
7334
7335
7336/**
7337 * Sets an NMI (\#NMI) exception as pending-for-injection into the VM.
7338 *
7339 * @param pVCpu The cross context virtual CPU structure.
7340 */
7341DECLINLINE(void) hmR0VmxSetPendingXcptNmi(PVMCPUCC pVCpu)
7342{
7343 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_NMI)
7344 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_NMI)
7345 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
7346 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
7347 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7348}
7349
7350
7351/**
7352 * Sets a double-fault (\#DF) exception as pending-for-injection into the VM.
7353 *
7354 * @param pVCpu The cross context virtual CPU structure.
7355 */
7356DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPUCC pVCpu)
7357{
7358 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
7359 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
7360 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
7361 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
7362 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7363}
7364
7365
7366/**
7367 * Sets an invalid-opcode (\#UD) exception as pending-for-injection into the VM.
7368 *
7369 * @param pVCpu The cross context virtual CPU structure.
7370 */
7371DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPUCC pVCpu)
7372{
7373 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_UD)
7374 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
7375 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
7376 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
7377 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7378}
7379
7380
7381/**
7382 * Sets a debug (\#DB) exception as pending-for-injection into the VM.
7383 *
7384 * @param pVCpu The cross context virtual CPU structure.
7385 */
7386DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPUCC pVCpu)
7387{
7388 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
7389 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
7390 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
7391 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
7392 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
7393}
7394
7395
7396#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7397/**
7398 * Sets a general-protection (\#GP) exception as pending-for-injection into the VM.
7399 *
7400 * @param pVCpu The cross context virtual CPU structure.
7401 * @param u32ErrCode The error code for the general-protection exception.
7402 */
7403DECLINLINE(void) hmR0VmxSetPendingXcptGP(PVMCPUCC pVCpu, uint32_t u32ErrCode)
7404{
7405 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
7406 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
7407 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
7408 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
7409 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
7410}
7411
7412
7413/**
7414 * Sets a stack (\#SS) exception as pending-for-injection into the VM.
7415 *
7416 * @param pVCpu The cross context virtual CPU structure.
7417 * @param u32ErrCode The error code for the stack exception.
7418 */
7419DECLINLINE(void) hmR0VmxSetPendingXcptSS(PVMCPUCC pVCpu, uint32_t u32ErrCode)
7420{
7421 uint32_t const u32IntInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_SS)
7422 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
7423 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 1)
7424 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
7425 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);
7426}
7427#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
7428
7429
7430/**
7431 * Fixes up attributes for the specified segment register.
7432 *
7433 * @param pVCpu The cross context virtual CPU structure.
7434 * @param pSelReg The segment register that needs fixing.
7435 * @param pszRegName The register name (for logging and assertions).
7436 */
7437static void hmR0VmxFixUnusableSegRegAttr(PVMCPUCC pVCpu, PCPUMSELREG pSelReg, const char *pszRegName)
7438{
7439 Assert(pSelReg->Attr.u & X86DESCATTR_UNUSABLE);
7440
7441 /*
7442 * If VT-x marks the segment as unusable, most other bits remain undefined:
7443 * - For CS the L, D and G bits have meaning.
7444 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
7445 * - For the remaining data segments no bits are defined.
7446 *
7447 * The present bit and the unusable bit has been observed to be set at the
7448 * same time (the selector was supposed to be invalid as we started executing
7449 * a V8086 interrupt in ring-0).
7450 *
7451 * What should be important for the rest of the VBox code, is that the P bit is
7452 * cleared. Some of the other VBox code recognizes the unusable bit, but
7453 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
7454 * safe side here, we'll strip off P and other bits we don't care about. If
7455 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
7456 *
7457 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
7458 */
7459#ifdef VBOX_STRICT
7460 uint32_t const uAttr = pSelReg->Attr.u;
7461#endif
7462
7463 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
7464 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
7465 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
7466
7467#ifdef VBOX_STRICT
7468 VMMRZCallRing3Disable(pVCpu);
7469 Log4Func(("Unusable %s: sel=%#x attr=%#x -> %#x\n", pszRegName, pSelReg->Sel, uAttr, pSelReg->Attr.u));
7470# ifdef DEBUG_bird
7471 AssertMsg((uAttr & ~X86DESCATTR_P) == pSelReg->Attr.u,
7472 ("%s: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
7473 pszRegName, uAttr, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
7474# endif
7475 VMMRZCallRing3Enable(pVCpu);
7476 NOREF(uAttr);
7477#endif
7478 RT_NOREF2(pVCpu, pszRegName);
7479}
7480
7481
7482/**
7483 * Imports a guest segment register from the current VMCS into the guest-CPU
7484 * context.
7485 *
7486 * @param pVCpu The cross context virtual CPU structure.
7487 * @param iSegReg The segment register number (X86_SREG_XXX).
7488 *
7489 * @remarks Called with interrupts and/or preemption disabled.
7490 */
7491static void hmR0VmxImportGuestSegReg(PVMCPUCC pVCpu, uint32_t iSegReg)
7492{
7493 Assert(iSegReg < X86_SREG_COUNT);
7494 Assert((uint32_t)VMX_VMCS16_GUEST_SEG_SEL(iSegReg) == g_aVmcsSegSel[iSegReg]);
7495 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg) == g_aVmcsSegLimit[iSegReg]);
7496 Assert((uint32_t)VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg) == g_aVmcsSegAttr[iSegReg]);
7497 Assert((uint32_t)VMX_VMCS_GUEST_SEG_BASE(iSegReg) == g_aVmcsSegBase[iSegReg]);
7498
7499 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
7500
7501 uint16_t u16Sel;
7502 int rc = VMXReadVmcs16(VMX_VMCS16_GUEST_SEG_SEL(iSegReg), &u16Sel); AssertRC(rc);
7503 pSelReg->Sel = u16Sel;
7504 pSelReg->ValidSel = u16Sel;
7505
7506 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SEG_LIMIT(iSegReg), &pSelReg->u32Limit); AssertRC(rc);
7507 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_SEG_BASE(iSegReg), &pSelReg->u64Base); AssertRC(rc);
7508
7509 uint32_t u32Attr;
7510 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SEG_ACCESS_RIGHTS(iSegReg), &u32Attr); AssertRC(rc);
7511 pSelReg->Attr.u = u32Attr;
7512 if (u32Attr & X86DESCATTR_UNUSABLE)
7513 hmR0VmxFixUnusableSegRegAttr(pVCpu, pSelReg, "ES\0CS\0SS\0DS\0FS\0GS" + iSegReg * 3);
7514
7515 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
7516}
7517
7518
7519/**
7520 * Imports the guest LDTR from the current VMCS into the guest-CPU context.
7521 *
7522 * @param pVCpu The cross context virtual CPU structure.
7523 *
7524 * @remarks Called with interrupts and/or preemption disabled.
7525 */
7526static void hmR0VmxImportGuestLdtr(PVMCPUCC pVCpu)
7527{
7528 uint16_t u16Sel;
7529 uint64_t u64Base;
7530 uint32_t u32Limit, u32Attr;
7531 int rc = VMXReadVmcs16(VMX_VMCS16_GUEST_LDTR_SEL, &u16Sel); AssertRC(rc);
7532 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, &u32Limit); AssertRC(rc);
7533 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
7534 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_LDTR_BASE, &u64Base); AssertRC(rc);
7535
7536 pVCpu->cpum.GstCtx.ldtr.Sel = u16Sel;
7537 pVCpu->cpum.GstCtx.ldtr.ValidSel = u16Sel;
7538 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
7539 pVCpu->cpum.GstCtx.ldtr.u32Limit = u32Limit;
7540 pVCpu->cpum.GstCtx.ldtr.u64Base = u64Base;
7541 pVCpu->cpum.GstCtx.ldtr.Attr.u = u32Attr;
7542 if (u32Attr & X86DESCATTR_UNUSABLE)
7543 hmR0VmxFixUnusableSegRegAttr(pVCpu, &pVCpu->cpum.GstCtx.ldtr, "LDTR");
7544}
7545
7546
7547/**
7548 * Imports the guest TR from the current VMCS into the guest-CPU context.
7549 *
7550 * @param pVCpu The cross context virtual CPU structure.
7551 *
7552 * @remarks Called with interrupts and/or preemption disabled.
7553 */
7554static void hmR0VmxImportGuestTr(PVMCPUCC pVCpu)
7555{
7556 uint16_t u16Sel;
7557 uint64_t u64Base;
7558 uint32_t u32Limit, u32Attr;
7559 int rc = VMXReadVmcs16(VMX_VMCS16_GUEST_TR_SEL, &u16Sel); AssertRC(rc);
7560 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, &u32Limit); AssertRC(rc);
7561 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, &u32Attr); AssertRC(rc);
7562 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_TR_BASE, &u64Base); AssertRC(rc);
7563
7564 pVCpu->cpum.GstCtx.tr.Sel = u16Sel;
7565 pVCpu->cpum.GstCtx.tr.ValidSel = u16Sel;
7566 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
7567 pVCpu->cpum.GstCtx.tr.u32Limit = u32Limit;
7568 pVCpu->cpum.GstCtx.tr.u64Base = u64Base;
7569 pVCpu->cpum.GstCtx.tr.Attr.u = u32Attr;
7570 /* TR is the only selector that can never be unusable. */
7571 Assert(!(u32Attr & X86DESCATTR_UNUSABLE));
7572}
7573
7574
7575/**
7576 * Imports the guest RIP from the VMCS back into the guest-CPU context.
7577 *
7578 * @param pVCpu The cross context virtual CPU structure.
7579 *
7580 * @remarks Called with interrupts and/or preemption disabled, should not assert!
7581 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
7582 * instead!!!
7583 */
7584static void hmR0VmxImportGuestRip(PVMCPUCC pVCpu)
7585{
7586 uint64_t u64Val;
7587 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7588 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP)
7589 {
7590 int rc = VMXReadVmcsNw(VMX_VMCS_GUEST_RIP, &u64Val);
7591 AssertRC(rc);
7592
7593 pCtx->rip = u64Val;
7594 EMHistoryUpdatePC(pVCpu, pCtx->rip, false);
7595 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
7596 }
7597}
7598
7599
7600/**
7601 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context.
7602 *
7603 * @param pVCpu The cross context virtual CPU structure.
7604 * @param pVmcsInfo The VMCS info. object.
7605 *
7606 * @remarks Called with interrupts and/or preemption disabled, should not assert!
7607 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
7608 * instead!!!
7609 */
7610static void hmR0VmxImportGuestRFlags(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
7611{
7612 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7613 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS)
7614 {
7615 uint64_t u64Val;
7616 int rc = VMXReadVmcsNw(VMX_VMCS_GUEST_RFLAGS, &u64Val);
7617 AssertRC(rc);
7618
7619 pCtx->rflags.u64 = u64Val;
7620 PCVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7621 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
7622 {
7623 pCtx->eflags.Bits.u1VM = 0;
7624 pCtx->eflags.Bits.u2IOPL = pVmcsInfoShared->RealMode.Eflags.Bits.u2IOPL;
7625 }
7626 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS;
7627 }
7628}
7629
7630
7631/**
7632 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
7633 * context.
7634 *
7635 * @param pVCpu The cross context virtual CPU structure.
7636 * @param pVmcsInfo The VMCS info. object.
7637 *
7638 * @remarks Called with interrupts and/or preemption disabled, try not to assert and
7639 * do not log!
7640 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState()
7641 * instead!!!
7642 */
7643static void hmR0VmxImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
7644{
7645 uint32_t u32Val;
7646 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32Val); AssertRC(rc);
7647 if (!u32Val)
7648 {
7649 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7650 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7651 CPUMSetGuestNmiBlocking(pVCpu, false);
7652 }
7653 else
7654 {
7655 /*
7656 * We must import RIP here to set our EM interrupt-inhibited state.
7657 * We also import RFLAGS as our code that evaluates pending interrupts
7658 * before VM-entry requires it.
7659 */
7660 hmR0VmxImportGuestRip(pVCpu);
7661 hmR0VmxImportGuestRFlags(pVCpu, pVmcsInfo);
7662
7663 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
7664 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
7665 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
7666 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
7667
7668 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
7669 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
7670 }
7671}
7672
7673
7674/**
7675 * Worker for VMXR0ImportStateOnDemand.
7676 *
7677 * @returns VBox status code.
7678 * @param pVCpu The cross context virtual CPU structure.
7679 * @param pVmcsInfo The VMCS info. object.
7680 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
7681 */
7682static int hmR0VmxImportGuestState(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint64_t fWhat)
7683{
7684 int rc = VINF_SUCCESS;
7685 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
7686 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
7687 uint32_t u32Val;
7688
7689 /*
7690 * Note! This is hack to workaround a mysterious BSOD observed with release builds
7691 * on Windows 10 64-bit hosts. Profile and debug builds are not affected and
7692 * neither are other host platforms.
7693 *
7694 * Committing this temporarily as it prevents BSOD.
7695 *
7696 * Update: This is very likely a compiler optimization bug, see @bugref{9180}.
7697 */
7698#ifdef RT_OS_WINDOWS
7699 if (pVM == 0 || pVM == (void *)(uintptr_t)-1)
7700 return VERR_HM_IPE_1;
7701#endif
7702
7703 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x);
7704
7705 /*
7706 * We disable interrupts to make the updating of the state and in particular
7707 * the fExtrn modification atomic wrt to preemption hooks.
7708 */
7709 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
7710
7711 fWhat &= pCtx->fExtrn;
7712 if (fWhat)
7713 {
7714 do
7715 {
7716 if (fWhat & CPUMCTX_EXTRN_RIP)
7717 hmR0VmxImportGuestRip(pVCpu);
7718
7719 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
7720 hmR0VmxImportGuestRFlags(pVCpu, pVmcsInfo);
7721
7722 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
7723 hmR0VmxImportGuestIntrState(pVCpu, pVmcsInfo);
7724
7725 if (fWhat & CPUMCTX_EXTRN_RSP)
7726 {
7727 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_RSP, &pCtx->rsp);
7728 AssertRC(rc);
7729 }
7730
7731 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
7732 {
7733 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7734 bool const fRealOnV86Active = pVmcsInfoShared->RealMode.fRealOnV86Active;
7735 if (fWhat & CPUMCTX_EXTRN_CS)
7736 {
7737 hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_CS);
7738 hmR0VmxImportGuestRip(pVCpu);
7739 if (fRealOnV86Active)
7740 pCtx->cs.Attr.u = pVmcsInfoShared->RealMode.AttrCS.u;
7741 EMHistoryUpdatePC(pVCpu, pCtx->cs.u64Base + pCtx->rip, true /* fFlattened */);
7742 }
7743 if (fWhat & CPUMCTX_EXTRN_SS)
7744 {
7745 hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_SS);
7746 if (fRealOnV86Active)
7747 pCtx->ss.Attr.u = pVmcsInfoShared->RealMode.AttrSS.u;
7748 }
7749 if (fWhat & CPUMCTX_EXTRN_DS)
7750 {
7751 hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_DS);
7752 if (fRealOnV86Active)
7753 pCtx->ds.Attr.u = pVmcsInfoShared->RealMode.AttrDS.u;
7754 }
7755 if (fWhat & CPUMCTX_EXTRN_ES)
7756 {
7757 hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_ES);
7758 if (fRealOnV86Active)
7759 pCtx->es.Attr.u = pVmcsInfoShared->RealMode.AttrES.u;
7760 }
7761 if (fWhat & CPUMCTX_EXTRN_FS)
7762 {
7763 hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_FS);
7764 if (fRealOnV86Active)
7765 pCtx->fs.Attr.u = pVmcsInfoShared->RealMode.AttrFS.u;
7766 }
7767 if (fWhat & CPUMCTX_EXTRN_GS)
7768 {
7769 hmR0VmxImportGuestSegReg(pVCpu, X86_SREG_GS);
7770 if (fRealOnV86Active)
7771 pCtx->gs.Attr.u = pVmcsInfoShared->RealMode.AttrGS.u;
7772 }
7773 }
7774
7775 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
7776 {
7777 if (fWhat & CPUMCTX_EXTRN_LDTR)
7778 hmR0VmxImportGuestLdtr(pVCpu);
7779
7780 if (fWhat & CPUMCTX_EXTRN_GDTR)
7781 {
7782 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); AssertRC(rc);
7783 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRC(rc);
7784 pCtx->gdtr.cbGdt = u32Val;
7785 }
7786
7787 /* Guest IDTR. */
7788 if (fWhat & CPUMCTX_EXTRN_IDTR)
7789 {
7790 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); AssertRC(rc);
7791 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRC(rc);
7792 pCtx->idtr.cbIdt = u32Val;
7793 }
7794
7795 /* Guest TR. */
7796 if (fWhat & CPUMCTX_EXTRN_TR)
7797 {
7798 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR,
7799 don't need to import that one. */
7800 if (!pVmcsInfo->pShared->RealMode.fRealOnV86Active)
7801 hmR0VmxImportGuestTr(pVCpu);
7802 }
7803 }
7804
7805 if (fWhat & CPUMCTX_EXTRN_DR7)
7806 {
7807 if (!pVCpu->hmr0.s.fUsingHyperDR7)
7808 {
7809 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_DR7, &pCtx->dr[7]);
7810 AssertRC(rc);
7811 }
7812 }
7813
7814 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
7815 {
7816 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); AssertRC(rc);
7817 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); AssertRC(rc);
7818 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRC(rc);
7819 pCtx->SysEnter.cs = u32Val;
7820 }
7821
7822 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
7823 {
7824 if ( pVM->hmr0.s.fAllow64BitGuests
7825 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
7826 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
7827 }
7828
7829 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
7830 {
7831 if ( pVM->hmr0.s.fAllow64BitGuests
7832 && (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
7833 {
7834 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
7835 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);
7836 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);
7837 }
7838 }
7839
7840 if (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))
7841 {
7842 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
7843 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
7844 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
7845 Assert(pMsrs);
7846 Assert(cMsrs <= VMX_MISC_MAX_MSRS(g_HmMsrs.u.vmx.u64Misc));
7847 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
7848 for (uint32_t i = 0; i < cMsrs; i++)
7849 {
7850 uint32_t const idMsr = pMsrs[i].u32Msr;
7851 switch (idMsr)
7852 {
7853 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break;
7854 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break;
7855 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;
7856 default:
7857 {
7858 uint32_t idxLbrMsr;
7859 if (pVM->hmr0.s.vmx.fLbr)
7860 {
7861 if (hmR0VmxIsLbrBranchFromMsr(pVM, idMsr, &idxLbrMsr))
7862 {
7863 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
7864 pVmcsInfoShared->au64LbrFromIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
7865 break;
7866 }
7867 if (hmR0VmxIsLbrBranchToMsr(pVM, idMsr, &idxLbrMsr))
7868 {
7869 Assert(idxLbrMsr < RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr));
7870 pVmcsInfoShared->au64LbrToIpMsr[idxLbrMsr] = pMsrs[i].u64Value;
7871 break;
7872 }
7873 if (idMsr == pVM->hmr0.s.vmx.idLbrTosMsr)
7874 {
7875 pVmcsInfoShared->u64LbrTosMsr = pMsrs[i].u64Value;
7876 break;
7877 }
7878 /* Fallthru (no break) */
7879 }
7880 pCtx->fExtrn = 0;
7881 pVCpu->hm.s.u32HMError = pMsrs->u32Msr;
7882 ASMSetFlags(fEFlags);
7883 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
7884 return VERR_HM_UNEXPECTED_LD_ST_MSR;
7885 }
7886 }
7887 }
7888 }
7889
7890 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
7891 {
7892 if (fWhat & CPUMCTX_EXTRN_CR0)
7893 {
7894 uint64_t u64Cr0;
7895 uint64_t u64Shadow;
7896 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_CR0, &u64Cr0); AssertRC(rc);
7897 rc = VMXReadVmcsNw(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Shadow); AssertRC(rc);
7898#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
7899 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
7900 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
7901#else
7902 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
7903 {
7904 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
7905 | (u64Shadow & pVmcsInfo->u64Cr0Mask);
7906 }
7907 else
7908 {
7909 /*
7910 * We've merged the guest and nested-guest's CR0 guest/host mask while executing
7911 * the nested-guest using hardware-assisted VMX. Accordingly we need to
7912 * re-construct CR0. See @bugref{9180#c95} for details.
7913 */
7914 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
7915 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7916 u64Cr0 = (u64Cr0 & ~pVmcsInfo->u64Cr0Mask)
7917 | (pVmcsNstGst->u64GuestCr0.u & pVmcsNstGst->u64Cr0Mask.u)
7918 | (u64Shadow & (pVmcsInfoGst->u64Cr0Mask & ~pVmcsNstGst->u64Cr0Mask.u));
7919 }
7920#endif
7921 VMMRZCallRing3Disable(pVCpu); /* May call into PGM which has Log statements. */
7922 CPUMSetGuestCR0(pVCpu, u64Cr0);
7923 VMMRZCallRing3Enable(pVCpu);
7924 }
7925
7926 if (fWhat & CPUMCTX_EXTRN_CR4)
7927 {
7928 uint64_t u64Cr4;
7929 uint64_t u64Shadow;
7930 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_CR4, &u64Cr4); AssertRC(rc);
7931 rc |= VMXReadVmcsNw(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Shadow); AssertRC(rc);
7932#ifndef VBOX_WITH_NESTED_HWVIRT_VMX
7933 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
7934 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
7935#else
7936 if (!CPUMIsGuestInVmxNonRootMode(pCtx))
7937 {
7938 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
7939 | (u64Shadow & pVmcsInfo->u64Cr4Mask);
7940 }
7941 else
7942 {
7943 /*
7944 * We've merged the guest and nested-guest's CR4 guest/host mask while executing
7945 * the nested-guest using hardware-assisted VMX. Accordingly we need to
7946 * re-construct CR4. See @bugref{9180#c95} for details.
7947 */
7948 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
7949 PVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7950 u64Cr4 = (u64Cr4 & ~pVmcsInfo->u64Cr4Mask)
7951 | (pVmcsNstGst->u64GuestCr4.u & pVmcsNstGst->u64Cr4Mask.u)
7952 | (u64Shadow & (pVmcsInfoGst->u64Cr4Mask & ~pVmcsNstGst->u64Cr4Mask.u));
7953 }
7954#endif
7955 pCtx->cr4 = u64Cr4;
7956 }
7957
7958 if (fWhat & CPUMCTX_EXTRN_CR3)
7959 {
7960 /* CR0.PG bit changes are always intercepted, so it's up to date. */
7961 if ( pVM->hmr0.s.vmx.fUnrestrictedGuest
7962 || ( pVM->hmr0.s.fNestedPaging
7963 && CPUMIsGuestPagingEnabledEx(pCtx)))
7964 {
7965 uint64_t u64Cr3;
7966 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_CR3, &u64Cr3); AssertRC(rc);
7967 if (pCtx->cr3 != u64Cr3)
7968 {
7969 pCtx->cr3 = u64Cr3;
7970 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
7971 }
7972
7973 /*
7974 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
7975 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
7976 */
7977 if (CPUMIsGuestInPAEModeEx(pCtx))
7978 {
7979 X86PDPE aPaePdpes[4];
7980 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &aPaePdpes[0].u); AssertRC(rc);
7981 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &aPaePdpes[1].u); AssertRC(rc);
7982 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &aPaePdpes[2].u); AssertRC(rc);
7983 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &aPaePdpes[3].u); AssertRC(rc);
7984 if (memcmp(&aPaePdpes[0], &pCtx->aPaePdpes[0], sizeof(aPaePdpes)))
7985 {
7986 memcpy(&pCtx->aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
7987 /* PGM now updates PAE PDPTEs while updating CR3. */
7988 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
7989 }
7990 }
7991 }
7992 }
7993 }
7994
7995#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7996 if (fWhat & CPUMCTX_EXTRN_HWVIRT)
7997 {
7998 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
7999 && !CPUMIsGuestInVmxNonRootMode(pCtx))
8000 {
8001 Assert(CPUMIsGuestInVmxRootMode(pCtx));
8002 rc = hmR0VmxCopyShadowToNstGstVmcs(pVCpu, pVmcsInfo);
8003 if (RT_SUCCESS(rc))
8004 { /* likely */ }
8005 else
8006 break;
8007 }
8008 }
8009#endif
8010 } while (0);
8011
8012 if (RT_SUCCESS(rc))
8013 {
8014 /* Update fExtrn. */
8015 pCtx->fExtrn &= ~fWhat;
8016
8017 /* If everything has been imported, clear the HM keeper bit. */
8018 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))
8019 {
8020 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM;
8021 Assert(!pCtx->fExtrn);
8022 }
8023 }
8024 }
8025 else
8026 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn));
8027
8028 /*
8029 * Restore interrupts.
8030 */
8031 ASMSetFlags(fEFlags);
8032
8033 STAM_PROFILE_ADV_STOP(& pVCpu->hm.s.StatImportGuestState, x);
8034
8035 if (RT_SUCCESS(rc))
8036 { /* likely */ }
8037 else
8038 return rc;
8039
8040 /*
8041 * Honor any pending CR3 updates.
8042 *
8043 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> VMXR0CallRing3Callback()
8044 * -> VMMRZCallRing3Disable() -> hmR0VmxImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
8045 * -> continue with VM-exit handling -> hmR0VmxImportGuestState() and here we are.
8046 *
8047 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus
8048 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that
8049 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should
8050 * -NOT- check if CPUMCTX_EXTRN_CR3 is set!
8051 *
8052 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
8053 *
8054 * The force-flag is checked first as it's cheaper for potential superfluous calls to this function.
8055 */
8056 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3)
8057 && VMMRZCallRing3IsEnabled(pVCpu))
8058 {
8059 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
8060 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
8061 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
8062 }
8063
8064 return VINF_SUCCESS;
8065}
8066
8067
8068/**
8069 * Saves the guest state from the VMCS into the guest-CPU context.
8070 *
8071 * @returns VBox status code.
8072 * @param pVCpu The cross context virtual CPU structure.
8073 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
8074 */
8075VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
8076{
8077 AssertPtr(pVCpu);
8078 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
8079 return hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fWhat);
8080}
8081
8082
8083/**
8084 * Check per-VM and per-VCPU force flag actions that require us to go back to
8085 * ring-3 for one reason or another.
8086 *
8087 * @returns Strict VBox status code (i.e. informational status codes too)
8088 * @retval VINF_SUCCESS if we don't have any actions that require going back to
8089 * ring-3.
8090 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
8091 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
8092 * interrupts)
8093 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
8094 * all EMTs to be in ring-3.
8095 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
8096 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
8097 * to the EM loop.
8098 *
8099 * @param pVCpu The cross context virtual CPU structure.
8100 * @param pVmxTransient The VMX-transient structure.
8101 * @param fStepping Whether we are single-stepping the guest using the
8102 * hypervisor debugger.
8103 *
8104 * @remarks This might cause nested-guest VM-exits, caller must check if the guest
8105 * is no longer in VMX non-root mode.
8106 */
8107static VBOXSTRICTRC hmR0VmxCheckForceFlags(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, bool fStepping)
8108{
8109 Assert(VMMRZCallRing3IsEnabled(pVCpu));
8110
8111 /*
8112 * Update pending interrupts into the APIC's IRR.
8113 */
8114 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
8115 APICUpdatePendingInterrupts(pVCpu);
8116
8117 /*
8118 * Anything pending? Should be more likely than not if we're doing a good job.
8119 */
8120 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8121 if ( !fStepping
8122 ? !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_MASK)
8123 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_MASK)
8124 : !VM_FF_IS_ANY_SET(pVM, VM_FF_HP_R0_PRE_HM_STEP_MASK)
8125 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
8126 return VINF_SUCCESS;
8127
8128 /* Pending PGM C3 sync. */
8129 if (VMCPU_FF_IS_ANY_SET(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
8130 {
8131 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8132 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4)));
8133 VBOXSTRICTRC rcStrict = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4,
8134 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
8135 if (rcStrict != VINF_SUCCESS)
8136 {
8137 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
8138 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict)));
8139 return rcStrict;
8140 }
8141 }
8142
8143 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
8144 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HM_TO_R3_MASK)
8145 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
8146 {
8147 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
8148 int rc = RT_LIKELY(!VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_RAW_TO_R3 : VINF_EM_NO_MEMORY;
8149 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
8150 return rc;
8151 }
8152
8153 /* Pending VM request packets, such as hardware interrupts. */
8154 if ( VM_FF_IS_SET(pVM, VM_FF_REQUEST)
8155 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
8156 {
8157 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchVmReq);
8158 Log4Func(("Pending VM request forcing us back to ring-3\n"));
8159 return VINF_EM_PENDING_REQUEST;
8160 }
8161
8162 /* Pending PGM pool flushes. */
8163 if (VM_FF_IS_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
8164 {
8165 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPgmPoolFlush);
8166 Log4Func(("PGM pool flush pending forcing us back to ring-3\n"));
8167 return VINF_PGM_POOL_FLUSH_PENDING;
8168 }
8169
8170 /* Pending DMA requests. */
8171 if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
8172 {
8173 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchDma);
8174 Log4Func(("Pending DMA request forcing us back to ring-3\n"));
8175 return VINF_EM_RAW_TO_R3;
8176 }
8177
8178#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8179 /*
8180 * Pending nested-guest events.
8181 *
8182 * Please note the priority of these events are specified and important.
8183 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
8184 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
8185 */
8186 if (pVmxTransient->fIsNestedGuest)
8187 {
8188 /* Pending nested-guest APIC-write. */
8189 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
8190 {
8191 Log4Func(("Pending nested-guest APIC-write\n"));
8192 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitApicWrite(pVCpu);
8193 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
8194 return rcStrict;
8195 }
8196
8197 /* Pending nested-guest monitor-trap flag (MTF). */
8198 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
8199 {
8200 Log4Func(("Pending nested-guest MTF\n"));
8201 VBOXSTRICTRC rcStrict = IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */);
8202 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
8203 return rcStrict;
8204 }
8205
8206 /* Pending nested-guest VMX-preemption timer expired. */
8207 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
8208 {
8209 Log4Func(("Pending nested-guest preempt timer\n"));
8210 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitPreemptTimer(pVCpu);
8211 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
8212 return rcStrict;
8213 }
8214 }
8215#else
8216 NOREF(pVmxTransient);
8217#endif
8218
8219 return VINF_SUCCESS;
8220}
8221
8222
8223/**
8224 * Converts any TRPM trap into a pending HM event. This is typically used when
8225 * entering from ring-3 (not longjmp returns).
8226 *
8227 * @param pVCpu The cross context virtual CPU structure.
8228 */
8229static void hmR0VmxTrpmTrapToPendingEvent(PVMCPUCC pVCpu)
8230{
8231 Assert(TRPMHasTrap(pVCpu));
8232 Assert(!pVCpu->hm.s.Event.fPending);
8233
8234 uint8_t uVector;
8235 TRPMEVENT enmTrpmEvent;
8236 uint32_t uErrCode;
8237 RTGCUINTPTR GCPtrFaultAddress;
8238 uint8_t cbInstr;
8239 bool fIcebp;
8240
8241 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr, &fIcebp);
8242 AssertRC(rc);
8243
8244 uint32_t u32IntInfo;
8245 u32IntInfo = uVector | VMX_IDT_VECTORING_INFO_VALID;
8246 u32IntInfo |= HMTrpmEventTypeToVmxEventType(uVector, enmTrpmEvent, fIcebp);
8247
8248 rc = TRPMResetTrap(pVCpu);
8249 AssertRC(rc);
8250 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
8251 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
8252
8253 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress);
8254}
8255
8256
8257/**
8258 * Converts the pending HM event into a TRPM trap.
8259 *
8260 * @param pVCpu The cross context virtual CPU structure.
8261 */
8262static void hmR0VmxPendingEventToTrpmTrap(PVMCPUCC pVCpu)
8263{
8264 Assert(pVCpu->hm.s.Event.fPending);
8265
8266 /* If a trap was already pending, we did something wrong! */
8267 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
8268
8269 uint32_t const u32IntInfo = pVCpu->hm.s.Event.u64IntInfo;
8270 uint32_t const uVector = VMX_IDT_VECTORING_INFO_VECTOR(u32IntInfo);
8271 TRPMEVENT const enmTrapType = HMVmxEventTypeToTrpmEventType(u32IntInfo);
8272
8273 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
8274
8275 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
8276 AssertRC(rc);
8277
8278 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
8279 TRPMSetErrorCode(pVCpu, pVCpu->hm.s.Event.u32ErrCode);
8280
8281 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(u32IntInfo))
8282 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
8283 else
8284 {
8285 uint8_t const uVectorType = VMX_IDT_VECTORING_INFO_TYPE(u32IntInfo);
8286 switch (uVectorType)
8287 {
8288 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
8289 TRPMSetTrapDueToIcebp(pVCpu);
8290 RT_FALL_THRU();
8291 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
8292 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT:
8293 {
8294 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
8295 || ( uVector == X86_XCPT_BP /* INT3 */
8296 || uVector == X86_XCPT_OF /* INTO */
8297 || uVector == X86_XCPT_DB /* INT1 (ICEBP) */),
8298 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
8299 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
8300 break;
8301 }
8302 }
8303 }
8304
8305 /* We're now done converting the pending event. */
8306 pVCpu->hm.s.Event.fPending = false;
8307}
8308
8309
8310/**
8311 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
8312 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
8313 *
8314 * @param pVmcsInfo The VMCS info. object.
8315 */
8316static void hmR0VmxSetIntWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
8317{
8318 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_INT_WINDOW_EXIT)
8319 {
8320 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
8321 {
8322 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_INT_WINDOW_EXIT;
8323 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8324 AssertRC(rc);
8325 }
8326 } /* else we will deliver interrupts whenever the guest Vm-exits next and is in a state to receive the interrupt. */
8327}
8328
8329
8330/**
8331 * Clears the interrupt-window exiting control in the VMCS.
8332 *
8333 * @param pVmcsInfo The VMCS info. object.
8334 */
8335DECLINLINE(void) hmR0VmxClearIntWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
8336{
8337 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
8338 {
8339 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_INT_WINDOW_EXIT;
8340 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8341 AssertRC(rc);
8342 }
8343}
8344
8345
8346/**
8347 * Sets the NMI-window exiting control in the VMCS which instructs VT-x to
8348 * cause a VM-exit as soon as the guest is in a state to receive NMIs.
8349 *
8350 * @param pVmcsInfo The VMCS info. object.
8351 */
8352static void hmR0VmxSetNmiWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
8353{
8354 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
8355 {
8356 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
8357 {
8358 pVmcsInfo->u32ProcCtls |= VMX_PROC_CTLS_NMI_WINDOW_EXIT;
8359 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8360 AssertRC(rc);
8361 Log4Func(("Setup NMI-window exiting\n"));
8362 }
8363 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */
8364}
8365
8366
8367/**
8368 * Clears the NMI-window exiting control in the VMCS.
8369 *
8370 * @param pVmcsInfo The VMCS info. object.
8371 */
8372DECLINLINE(void) hmR0VmxClearNmiWindowExitVmcs(PVMXVMCSINFO pVmcsInfo)
8373{
8374 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
8375 {
8376 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_NMI_WINDOW_EXIT;
8377 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
8378 AssertRC(rc);
8379 }
8380}
8381
8382
8383/**
8384 * Does the necessary state syncing before returning to ring-3 for any reason
8385 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
8386 *
8387 * @returns VBox status code.
8388 * @param pVCpu The cross context virtual CPU structure.
8389 * @param fImportState Whether to import the guest state from the VMCS back
8390 * to the guest-CPU context.
8391 *
8392 * @remarks No-long-jmp zone!!!
8393 */
8394static int hmR0VmxLeave(PVMCPUCC pVCpu, bool fImportState)
8395{
8396 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8397 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8398
8399 RTCPUID const idCpu = RTMpCpuId();
8400 Log4Func(("HostCpuId=%u\n", idCpu));
8401
8402 /*
8403 * !!! IMPORTANT !!!
8404 * If you modify code here, check whether VMXR0CallRing3Callback() needs to be updated too.
8405 */
8406
8407 /* Save the guest state if necessary. */
8408 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
8409 if (fImportState)
8410 {
8411 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8412 AssertRCReturn(rc, rc);
8413 }
8414
8415 /* Restore host FPU state if necessary. We will resync on next R0 reentry. */
8416 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
8417 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
8418
8419 /* Restore host debug registers if necessary. We will resync on next R0 reentry. */
8420#ifdef VBOX_STRICT
8421 if (CPUMIsHyperDebugStateActive(pVCpu))
8422 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT);
8423#endif
8424 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
8425 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
8426 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
8427
8428 /* Restore host-state bits that VT-x only restores partially. */
8429 if (pVCpu->hmr0.s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED)
8430 {
8431 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hmr0.s.vmx.fRestoreHostFlags, idCpu));
8432 VMXRestoreHostState(pVCpu->hmr0.s.vmx.fRestoreHostFlags, &pVCpu->hmr0.s.vmx.RestoreHost);
8433 }
8434 pVCpu->hmr0.s.vmx.fRestoreHostFlags = 0;
8435
8436 /* Restore the lazy host MSRs as we're leaving VT-x context. */
8437 if (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
8438 {
8439 /* We shouldn't restore the host MSRs without saving the guest MSRs first. */
8440 if (!fImportState)
8441 {
8442 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS);
8443 AssertRCReturn(rc, rc);
8444 }
8445 hmR0VmxLazyRestoreHostMsrs(pVCpu);
8446 Assert(!pVCpu->hmr0.s.vmx.fLazyMsrs);
8447 }
8448 else
8449 pVCpu->hmr0.s.vmx.fLazyMsrs = 0;
8450
8451 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
8452 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
8453
8454 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
8455 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState);
8456 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState);
8457 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatPreExit);
8458 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitHandling);
8459 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
8460 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
8461 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
8462 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitVmentry);
8463 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
8464
8465 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
8466
8467 /** @todo This partially defeats the purpose of having preemption hooks.
8468 * The problem is, deregistering the hooks should be moved to a place that
8469 * lasts until the EMT is about to be destroyed not everytime while leaving HM
8470 * context.
8471 */
8472 int rc = hmR0VmxClearVmcs(pVmcsInfo);
8473 AssertRCReturn(rc, rc);
8474
8475#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8476 /*
8477 * A valid shadow VMCS is made active as part of VM-entry. It is necessary to
8478 * clear a shadow VMCS before allowing that VMCS to become active on another
8479 * logical processor. We may or may not be importing guest state which clears
8480 * it, so cover for it here.
8481 *
8482 * See Intel spec. 24.11.1 "Software Use of Virtual-Machine Control Structures".
8483 */
8484 if ( pVmcsInfo->pvShadowVmcs
8485 && pVmcsInfo->fShadowVmcsState != VMX_V_VMCS_LAUNCH_STATE_CLEAR)
8486 {
8487 rc = hmR0VmxClearShadowVmcs(pVmcsInfo);
8488 AssertRCReturn(rc, rc);
8489 }
8490
8491 /*
8492 * Flag that we need to re-export the host state if we switch to this VMCS before
8493 * executing guest or nested-guest code.
8494 */
8495 pVmcsInfo->idHostCpuState = NIL_RTCPUID;
8496#endif
8497
8498 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
8499 NOREF(idCpu);
8500 return VINF_SUCCESS;
8501}
8502
8503
8504/**
8505 * Leaves the VT-x session.
8506 *
8507 * @returns VBox status code.
8508 * @param pVCpu The cross context virtual CPU structure.
8509 *
8510 * @remarks No-long-jmp zone!!!
8511 */
8512static int hmR0VmxLeaveSession(PVMCPUCC pVCpu)
8513{
8514 HM_DISABLE_PREEMPT(pVCpu);
8515 HMVMX_ASSERT_CPU_SAFE(pVCpu);
8516 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
8517 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
8518
8519 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
8520 and done this from the VMXR0ThreadCtxCallback(). */
8521 if (!pVCpu->hmr0.s.fLeaveDone)
8522 {
8523 int rc2 = hmR0VmxLeave(pVCpu, true /* fImportState */);
8524 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2);
8525 pVCpu->hmr0.s.fLeaveDone = true;
8526 }
8527 Assert(!pVCpu->cpum.GstCtx.fExtrn);
8528
8529 /*
8530 * !!! IMPORTANT !!!
8531 * If you modify code here, make sure to check whether VMXR0CallRing3Callback() needs to be updated too.
8532 */
8533
8534 /* Deregister hook now that we've left HM context before re-enabling preemption. */
8535 /** @todo Deregistering here means we need to VMCLEAR always
8536 * (longjmp/exit-to-r3) in VT-x which is not efficient, eliminate need
8537 * for calling VMMR0ThreadCtxHookDisable here! */
8538 VMMR0ThreadCtxHookDisable(pVCpu);
8539
8540 /* Leave HM context. This takes care of local init (term) and deregistering the longjmp-to-ring-3 callback. */
8541 int rc = HMR0LeaveCpu(pVCpu);
8542 HM_RESTORE_PREEMPT();
8543 return rc;
8544}
8545
8546
8547/**
8548 * Take necessary actions before going back to ring-3.
8549 *
8550 * An action requires us to go back to ring-3. This function does the necessary
8551 * steps before we can safely return to ring-3. This is not the same as longjmps
8552 * to ring-3, this is voluntary and prepares the guest so it may continue
8553 * executing outside HM (recompiler/IEM).
8554 *
8555 * @returns VBox status code.
8556 * @param pVCpu The cross context virtual CPU structure.
8557 * @param rcExit The reason for exiting to ring-3. Can be
8558 * VINF_VMM_UNKNOWN_RING3_CALL.
8559 */
8560static int hmR0VmxExitToRing3(PVMCPUCC pVCpu, VBOXSTRICTRC rcExit)
8561{
8562 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
8563
8564 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
8565 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
8566 {
8567 VMXGetCurrentVmcs(&pVCpu->hm.s.vmx.LastError.HCPhysCurrentVmcs);
8568 pVCpu->hm.s.vmx.LastError.u32VmcsRev = *(uint32_t *)pVmcsInfo->pvVmcs;
8569 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hmr0.s.idEnteredCpu;
8570 /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */
8571 }
8572
8573 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
8574 VMMRZCallRing3Disable(pVCpu);
8575 Log4Func(("rcExit=%d\n", VBOXSTRICTRC_VAL(rcExit)));
8576
8577 /*
8578 * Convert any pending HM events back to TRPM due to premature exits to ring-3.
8579 * We need to do this only on returns to ring-3 and not for longjmps to ring3.
8580 *
8581 * This is because execution may continue from ring-3 and we would need to inject
8582 * the event from there (hence place it back in TRPM).
8583 */
8584 if (pVCpu->hm.s.Event.fPending)
8585 {
8586 hmR0VmxPendingEventToTrpmTrap(pVCpu);
8587 Assert(!pVCpu->hm.s.Event.fPending);
8588
8589 /* Clear the events from the VMCS. */
8590 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0); AssertRC(rc);
8591 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, 0); AssertRC(rc);
8592 }
8593#ifdef VBOX_STRICT
8594 /*
8595 * We check for rcExit here since for errors like VERR_VMX_UNABLE_TO_START_VM (which are
8596 * fatal), we don't care about verifying duplicate injection of events. Errors like
8597 * VERR_EM_INTERPRET are converted to their VINF_* counterparts -prior- to calling this
8598 * function so those should and will be checked below.
8599 */
8600 else if (RT_SUCCESS(rcExit))
8601 {
8602 /*
8603 * Ensure we don't accidentally clear a pending HM event without clearing the VMCS.
8604 * This can be pretty hard to debug otherwise, interrupts might get injected twice
8605 * occasionally, see @bugref{9180#c42}.
8606 *
8607 * However, if the VM-entry failed, any VM entry-interruption info. field would
8608 * be left unmodified as the event would not have been injected to the guest. In
8609 * such cases, don't assert, we're not going to continue guest execution anyway.
8610 */
8611 uint32_t uExitReason;
8612 uint32_t uEntryIntInfo;
8613 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
8614 rc |= VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &uEntryIntInfo);
8615 AssertRC(rc);
8616 AssertMsg(VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason) || !VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo),
8617 ("uExitReason=%#RX32 uEntryIntInfo=%#RX32 rcExit=%d\n", uExitReason, uEntryIntInfo, VBOXSTRICTRC_VAL(rcExit)));
8618 }
8619#endif
8620
8621 /*
8622 * Clear the interrupt-window and NMI-window VMCS controls as we could have got
8623 * a VM-exit with higher priority than interrupt-window or NMI-window VM-exits
8624 * (e.g. TPR below threshold).
8625 */
8626 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
8627 {
8628 hmR0VmxClearIntWindowExitVmcs(pVmcsInfo);
8629 hmR0VmxClearNmiWindowExitVmcs(pVmcsInfo);
8630 }
8631
8632 /* If we're emulating an instruction, we shouldn't have any TRPM traps pending
8633 and if we're injecting an event we should have a TRPM trap pending. */
8634 AssertMsg(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
8635#ifndef DEBUG_bird /* Triggered after firing an NMI against NT4SP1, possibly a triple fault in progress. */
8636 AssertMsg(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit)));
8637#endif
8638
8639 /* Save guest state and restore host state bits. */
8640 int rc = hmR0VmxLeaveSession(pVCpu);
8641 AssertRCReturn(rc, rc);
8642 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
8643
8644 /* Thread-context hooks are unregistered at this point!!! */
8645 /* Ring-3 callback notifications are unregistered at this point!!! */
8646
8647 /* Sync recompiler state. */
8648 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
8649 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
8650 | CPUM_CHANGED_LDTR
8651 | CPUM_CHANGED_GDTR
8652 | CPUM_CHANGED_IDTR
8653 | CPUM_CHANGED_TR
8654 | CPUM_CHANGED_HIDDEN_SEL_REGS);
8655 if ( pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging
8656 && CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx))
8657 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
8658
8659 Assert(!pVCpu->hmr0.s.fClearTrapFlag);
8660
8661 /* Update the exit-to-ring 3 reason. */
8662 pVCpu->hm.s.rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit);
8663
8664 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */
8665 if ( rcExit != VINF_EM_RAW_INTERRUPT
8666 || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
8667 {
8668 Assert(!(pVCpu->cpum.GstCtx.fExtrn & HMVMX_CPUMCTX_EXTRN_ALL));
8669 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
8670 }
8671
8672 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
8673 VMMRZCallRing3Enable(pVCpu);
8674 return rc;
8675}
8676
8677
8678/**
8679 * VMMRZCallRing3() callback wrapper which saves the guest state before we
8680 * longjump due to a ring-0 assertion.
8681 *
8682 * @returns VBox status code.
8683 * @param pVCpu The cross context virtual CPU structure.
8684 */
8685VMMR0DECL(int) VMXR0AssertionCallback(PVMCPUCC pVCpu)
8686{
8687 /*
8688 * !!! IMPORTANT !!!
8689 * If you modify code here, check whether hmR0VmxLeave() and hmR0VmxLeaveSession() needs to be updated too.
8690 * This is a stripped down version which gets out ASAP, trying to not trigger any further assertions.
8691 */
8692 VMMR0AssertionRemoveNotification(pVCpu);
8693 VMMRZCallRing3Disable(pVCpu);
8694 HM_DISABLE_PREEMPT(pVCpu);
8695
8696 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
8697 hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
8698 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu);
8699 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */);
8700
8701 /* Restore host-state bits that VT-x only restores partially. */
8702 if (pVCpu->hmr0.s.vmx.fRestoreHostFlags > VMX_RESTORE_HOST_REQUIRED)
8703 VMXRestoreHostState(pVCpu->hmr0.s.vmx.fRestoreHostFlags, &pVCpu->hmr0.s.vmx.RestoreHost);
8704 pVCpu->hmr0.s.vmx.fRestoreHostFlags = 0;
8705
8706 /* Restore the lazy host MSRs as we're leaving VT-x context. */
8707 if (pVCpu->hmr0.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)
8708 hmR0VmxLazyRestoreHostMsrs(pVCpu);
8709
8710 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
8711 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = false;
8712 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
8713
8714 /* Clear the current VMCS data back to memory (shadow VMCS if any would have been
8715 cleared as part of importing the guest state above. */
8716 hmR0VmxClearVmcs(pVmcsInfo);
8717
8718 /** @todo eliminate the need for calling VMMR0ThreadCtxHookDisable here! */
8719 VMMR0ThreadCtxHookDisable(pVCpu);
8720
8721 /* Leave HM context. This takes care of local init (term). */
8722 HMR0LeaveCpu(pVCpu);
8723 HM_RESTORE_PREEMPT();
8724 return VINF_SUCCESS;
8725}
8726
8727
8728/**
8729 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
8730 * stack.
8731 *
8732 * @returns Strict VBox status code (i.e. informational status codes too).
8733 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
8734 * @param pVCpu The cross context virtual CPU structure.
8735 * @param uValue The value to push to the guest stack.
8736 */
8737static VBOXSTRICTRC hmR0VmxRealModeGuestStackPush(PVMCPUCC pVCpu, uint16_t uValue)
8738{
8739 /*
8740 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
8741 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
8742 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
8743 */
8744 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8745 if (pCtx->sp == 1)
8746 return VINF_EM_RESET;
8747 pCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
8748 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), pCtx->ss.u64Base + pCtx->sp, &uValue, sizeof(uint16_t));
8749 AssertRC(rc);
8750 return rc;
8751}
8752
8753
8754/**
8755 * Injects an event into the guest upon VM-entry by updating the relevant fields
8756 * in the VM-entry area in the VMCS.
8757 *
8758 * @returns Strict VBox status code (i.e. informational status codes too).
8759 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
8760 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
8761 *
8762 * @param pVCpu The cross context virtual CPU structure.
8763 * @param pVmxTransient The VMX-transient structure.
8764 * @param pEvent The event being injected.
8765 * @param pfIntrState Pointer to the VT-x guest-interruptibility-state. This
8766 * will be updated if necessary. This cannot not be NULL.
8767 * @param fStepping Whether we're single-stepping guest execution and should
8768 * return VINF_EM_DBG_STEPPED if the event is injected
8769 * directly (registers modified by us, not by hardware on
8770 * VM-entry).
8771 */
8772static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PCHMEVENT pEvent, bool fStepping,
8773 uint32_t *pfIntrState)
8774{
8775 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
8776 AssertMsg(!RT_HI_U32(pEvent->u64IntInfo), ("%#RX64\n", pEvent->u64IntInfo));
8777 Assert(pfIntrState);
8778
8779 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
8780 uint32_t u32IntInfo = pEvent->u64IntInfo;
8781 uint32_t const u32ErrCode = pEvent->u32ErrCode;
8782 uint32_t const cbInstr = pEvent->cbInstr;
8783 RTGCUINTPTR const GCPtrFault = pEvent->GCPtrFaultAddress;
8784 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(u32IntInfo);
8785 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(u32IntInfo);
8786
8787#ifdef VBOX_STRICT
8788 /*
8789 * Validate the error-code-valid bit for hardware exceptions.
8790 * No error codes for exceptions in real-mode.
8791 *
8792 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
8793 */
8794 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
8795 && !CPUMIsGuestInRealModeEx(pCtx))
8796 {
8797 switch (uVector)
8798 {
8799 case X86_XCPT_PF:
8800 case X86_XCPT_DF:
8801 case X86_XCPT_TS:
8802 case X86_XCPT_NP:
8803 case X86_XCPT_SS:
8804 case X86_XCPT_GP:
8805 case X86_XCPT_AC:
8806 AssertMsg(VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo),
8807 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
8808 RT_FALL_THRU();
8809 default:
8810 break;
8811 }
8812 }
8813
8814 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
8815 Assert( uIntType != VMX_EXIT_INT_INFO_TYPE_NMI
8816 || !(*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
8817#endif
8818
8819 if ( uIntType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT
8820 || uIntType == VMX_EXIT_INT_INFO_TYPE_NMI
8821 || uIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT
8822 || uIntType == VMX_EXIT_INT_INFO_TYPE_SW_XCPT)
8823 {
8824 Assert(uVector <= X86_XCPT_LAST);
8825 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_NMI || uVector == X86_XCPT_NMI);
8826 Assert(uIntType != VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT || uVector == X86_XCPT_DB);
8827 STAM_COUNTER_INC(&pVCpu->hm.s.aStatInjectedXcpts[uVector]);
8828 }
8829 else
8830 STAM_COUNTER_INC(&pVCpu->hm.s.aStatInjectedIrqs[uVector & MASK_INJECT_IRQ_STAT]);
8831
8832 /*
8833 * Hardware interrupts & exceptions cannot be delivered through the software interrupt
8834 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the
8835 * interrupt handler in the (real-mode) guest.
8836 *
8837 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode".
8838 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
8839 */
8840 if (CPUMIsGuestInRealModeEx(pCtx)) /* CR0.PE bit changes are always intercepted, so it's up to date. */
8841 {
8842 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest)
8843 {
8844 /*
8845 * For CPUs with unrestricted guest execution enabled and with the guest
8846 * in real-mode, we must not set the deliver-error-code bit.
8847 *
8848 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
8849 */
8850 u32IntInfo &= ~VMX_ENTRY_INT_INFO_ERROR_CODE_VALID;
8851 }
8852 else
8853 {
8854 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8855 Assert(PDMVmmDevHeapIsEnabled(pVM));
8856 Assert(pVM->hm.s.vmx.pRealModeTSS);
8857 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
8858
8859 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */
8860 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
8861 int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_TABLE_MASK
8862 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RFLAGS);
8863 AssertRCReturn(rc2, rc2);
8864
8865 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
8866 size_t const cbIdtEntry = sizeof(X86IDTR16);
8867 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pCtx->idtr.cbIdt)
8868 {
8869 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
8870 if (uVector == X86_XCPT_DF)
8871 return VINF_EM_RESET;
8872
8873 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault.
8874 No error codes for exceptions in real-mode. */
8875 if (uVector == X86_XCPT_GP)
8876 {
8877 uint32_t const uXcptDfInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DF)
8878 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
8879 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
8880 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
8881 HMEVENT EventXcptDf;
8882 RT_ZERO(EventXcptDf);
8883 EventXcptDf.u64IntInfo = uXcptDfInfo;
8884 return hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &EventXcptDf, fStepping, pfIntrState);
8885 }
8886
8887 /*
8888 * If we're injecting an event with no valid IDT entry, inject a #GP.
8889 * No error codes for exceptions in real-mode.
8890 *
8891 * See Intel spec. 20.1.4 "Interrupt and Exception Handling"
8892 */
8893 uint32_t const uXcptGpInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_GP)
8894 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
8895 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID, 0)
8896 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
8897 HMEVENT EventXcptGp;
8898 RT_ZERO(EventXcptGp);
8899 EventXcptGp.u64IntInfo = uXcptGpInfo;
8900 return hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &EventXcptGp, fStepping, pfIntrState);
8901 }
8902
8903 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
8904 uint16_t uGuestIp = pCtx->ip;
8905 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT)
8906 {
8907 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
8908 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
8909 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
8910 }
8911 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_SW_INT)
8912 uGuestIp = pCtx->ip + (uint16_t)cbInstr;
8913
8914 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
8915 X86IDTR16 IdtEntry;
8916 RTGCPHYS const GCPhysIdtEntry = (RTGCPHYS)pCtx->idtr.pIdt + uVector * cbIdtEntry;
8917 rc2 = PGMPhysSimpleReadGCPhys(pVM, &IdtEntry, GCPhysIdtEntry, cbIdtEntry);
8918 AssertRCReturn(rc2, rc2);
8919
8920 /* Construct the stack frame for the interrupt/exception handler. */
8921 VBOXSTRICTRC rcStrict;
8922 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->eflags.u32);
8923 if (rcStrict == VINF_SUCCESS)
8924 {
8925 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, pCtx->cs.Sel);
8926 if (rcStrict == VINF_SUCCESS)
8927 rcStrict = hmR0VmxRealModeGuestStackPush(pVCpu, uGuestIp);
8928 }
8929
8930 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
8931 if (rcStrict == VINF_SUCCESS)
8932 {
8933 pCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
8934 pCtx->rip = IdtEntry.offSel;
8935 pCtx->cs.Sel = IdtEntry.uSel;
8936 pCtx->cs.ValidSel = IdtEntry.uSel;
8937 pCtx->cs.u64Base = IdtEntry.uSel << cbIdtEntry;
8938 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
8939 && uVector == X86_XCPT_PF)
8940 pCtx->cr2 = GCPtrFault;
8941
8942 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CS | HM_CHANGED_GUEST_CR2
8943 | HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
8944 | HM_CHANGED_GUEST_RSP);
8945
8946 /*
8947 * If we delivered a hardware exception (other than an NMI) and if there was
8948 * block-by-STI in effect, we should clear it.
8949 */
8950 if (*pfIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
8951 {
8952 Assert( uIntType != VMX_ENTRY_INT_INFO_TYPE_NMI
8953 && uIntType != VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
8954 Log4Func(("Clearing inhibition due to STI\n"));
8955 *pfIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
8956 }
8957
8958 Log4(("Injected real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n",
8959 u32IntInfo, u32ErrCode, cbInstr, pCtx->eflags.u, pCtx->cs.Sel, pCtx->eip));
8960
8961 /*
8962 * The event has been truly dispatched to the guest. Mark it as no longer pending so
8963 * we don't attempt to undo it if we are returning to ring-3 before executing guest code.
8964 */
8965 pVCpu->hm.s.Event.fPending = false;
8966
8967 /*
8968 * If we eventually support nested-guest execution without unrestricted guest execution,
8969 * we should set fInterceptEvents here.
8970 */
8971 Assert(!pVmxTransient->fIsNestedGuest);
8972
8973 /* If we're stepping and we've changed cs:rip above, bail out of the VMX R0 execution loop. */
8974 if (fStepping)
8975 rcStrict = VINF_EM_DBG_STEPPED;
8976 }
8977 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
8978 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8979 return rcStrict;
8980 }
8981 }
8982
8983 /*
8984 * Validate.
8985 */
8986 Assert(VMX_ENTRY_INT_INFO_IS_VALID(u32IntInfo)); /* Bit 31 (Valid bit) must be set by caller. */
8987 Assert(!(u32IntInfo & VMX_BF_ENTRY_INT_INFO_RSVD_12_30_MASK)); /* Bits 30:12 MBZ. */
8988
8989 /*
8990 * Inject the event into the VMCS.
8991 */
8992 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);
8993 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(u32IntInfo))
8994 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
8995 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
8996 AssertRC(rc);
8997
8998 /*
8999 * Update guest CR2 if this is a page-fault.
9000 */
9001 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(u32IntInfo))
9002 pCtx->cr2 = GCPtrFault;
9003
9004 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x CR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, pCtx->cr2));
9005 return VINF_SUCCESS;
9006}
9007
9008
9009/**
9010 * Evaluates the event to be delivered to the guest and sets it as the pending
9011 * event.
9012 *
9013 * Toggling of interrupt force-flags here is safe since we update TRPM on premature
9014 * exits to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must
9015 * NOT restore these force-flags.
9016 *
9017 * @returns Strict VBox status code (i.e. informational status codes too).
9018 * @param pVCpu The cross context virtual CPU structure.
9019 * @param pVmxTransient The VMX-transient structure.
9020 * @param pfIntrState Where to store the VT-x guest-interruptibility state.
9021 */
9022static VBOXSTRICTRC hmR0VmxEvaluatePendingEvent(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t *pfIntrState)
9023{
9024 Assert(pfIntrState);
9025 Assert(!TRPMHasTrap(pVCpu));
9026
9027 /*
9028 * Compute/update guest-interruptibility state related FFs.
9029 * The FFs will be used below while evaluating events to be injected.
9030 */
9031 *pfIntrState = hmR0VmxGetGuestIntrStateAndUpdateFFs(pVCpu);
9032
9033 /*
9034 * Evaluate if a new event needs to be injected.
9035 * An event that's already pending has already performed all necessary checks.
9036 */
9037 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
9038 bool const fIsNestedGuest = pVmxTransient->fIsNestedGuest;
9039 if ( !pVCpu->hm.s.Event.fPending
9040 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
9041 {
9042 /** @todo SMI. SMIs take priority over NMIs. */
9043
9044 /*
9045 * NMIs.
9046 * NMIs take priority over external interrupts.
9047 */
9048 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9049 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
9050 {
9051 /*
9052 * For a guest, the FF always indicates the guest's ability to receive an NMI.
9053 *
9054 * For a nested-guest, the FF always indicates the outer guest's ability to
9055 * receive an NMI while the guest-interruptibility state bit depends on whether
9056 * the nested-hypervisor is using virtual-NMIs.
9057 */
9058 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
9059 {
9060#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9061 if ( fIsNestedGuest
9062 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT))
9063 return IEMExecVmxVmexitXcptNmi(pVCpu);
9064#endif
9065 hmR0VmxSetPendingXcptNmi(pVCpu);
9066 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
9067 Log4Func(("NMI pending injection\n"));
9068
9069 /* We've injected the NMI, bail. */
9070 return VINF_SUCCESS;
9071 }
9072 else if (!fIsNestedGuest)
9073 hmR0VmxSetNmiWindowExitVmcs(pVmcsInfo);
9074 }
9075
9076 /*
9077 * External interrupts (PIC/APIC).
9078 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it.
9079 * We cannot re-request the interrupt from the controller again.
9080 */
9081 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
9082 && !pVCpu->hm.s.fSingleInstruction)
9083 {
9084 Assert(!DBGFIsStepping(pVCpu));
9085 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
9086 AssertRC(rc);
9087
9088 /*
9089 * We must not check EFLAGS directly when executing a nested-guest, use
9090 * CPUMIsGuestPhysIntrEnabled() instead as EFLAGS.IF does not control the blocking of
9091 * external interrupts when "External interrupt exiting" is set. This fixes a nasty
9092 * SMP hang while executing nested-guest VCPUs on spinlocks which aren't rescued by
9093 * other VM-exits (like a preemption timer), see @bugref{9562#c18}.
9094 *
9095 * See Intel spec. 25.4.1 "Event Blocking".
9096 */
9097 if (CPUMIsGuestPhysIntrEnabled(pVCpu))
9098 {
9099#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9100 if ( fIsNestedGuest
9101 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
9102 {
9103 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
9104 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
9105 return rcStrict;
9106 }
9107#endif
9108 uint8_t u8Interrupt;
9109 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
9110 if (RT_SUCCESS(rc))
9111 {
9112#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9113 if ( fIsNestedGuest
9114 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
9115 {
9116 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);
9117 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE);
9118 return rcStrict;
9119 }
9120#endif
9121 hmR0VmxSetPendingExtInt(pVCpu, u8Interrupt);
9122 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt));
9123 }
9124 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
9125 {
9126 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
9127
9128 if ( !fIsNestedGuest
9129 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
9130 hmR0VmxApicSetTprThreshold(pVmcsInfo, u8Interrupt >> 4);
9131 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */
9132
9133 /*
9134 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and
9135 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no
9136 * need to re-set this force-flag here.
9137 */
9138 }
9139 else
9140 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
9141
9142 /* We've injected the interrupt or taken necessary action, bail. */
9143 return VINF_SUCCESS;
9144 }
9145 if (!fIsNestedGuest)
9146 hmR0VmxSetIntWindowExitVmcs(pVmcsInfo);
9147 }
9148 }
9149 else if (!fIsNestedGuest)
9150 {
9151 /*
9152 * An event is being injected or we are in an interrupt shadow. Check if another event is
9153 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept
9154 * the pending event.
9155 */
9156 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
9157 hmR0VmxSetNmiWindowExitVmcs(pVmcsInfo);
9158 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
9159 && !pVCpu->hm.s.fSingleInstruction)
9160 hmR0VmxSetIntWindowExitVmcs(pVmcsInfo);
9161 }
9162 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */
9163
9164 return VINF_SUCCESS;
9165}
9166
9167
9168/**
9169 * Injects any pending events into the guest if the guest is in a state to
9170 * receive them.
9171 *
9172 * @returns Strict VBox status code (i.e. informational status codes too).
9173 * @param pVCpu The cross context virtual CPU structure.
9174 * @param pVmxTransient The VMX-transient structure.
9175 * @param fIntrState The VT-x guest-interruptibility state.
9176 * @param fStepping Whether we are single-stepping the guest using the
9177 * hypervisor debugger and should return
9178 * VINF_EM_DBG_STEPPED if the event was dispatched
9179 * directly.
9180 */
9181static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, uint32_t fIntrState, bool fStepping)
9182{
9183 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
9184 Assert(VMMRZCallRing3IsEnabled(pVCpu));
9185
9186#ifdef VBOX_STRICT
9187 /*
9188 * Verify guest-interruptibility state.
9189 *
9190 * We put this in a scoped block so we do not accidentally use fBlockSti or fBlockMovSS,
9191 * since injecting an event may modify the interruptibility state and we must thus always
9192 * use fIntrState.
9193 */
9194 {
9195 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
9196 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
9197 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
9198 Assert(!fBlockSti || pVCpu->cpum.GstCtx.eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
9199 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/
9200 Assert(!TRPMHasTrap(pVCpu));
9201 NOREF(fBlockMovSS); NOREF(fBlockSti);
9202 }
9203#endif
9204
9205 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
9206 if (pVCpu->hm.s.Event.fPending)
9207 {
9208 /*
9209 * Do -not- clear any interrupt-window exiting control here. We might have an interrupt
9210 * pending even while injecting an event and in this case, we want a VM-exit as soon as
9211 * the guest is ready for the next interrupt, see @bugref{6208#c45}.
9212 *
9213 * See Intel spec. 26.6.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9214 */
9215 uint32_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVCpu->hm.s.Event.u64IntInfo);
9216#ifdef VBOX_STRICT
9217 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
9218 {
9219 Assert(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_IF);
9220 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
9221 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
9222 }
9223 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
9224 {
9225 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
9226 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI));
9227 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
9228 }
9229#endif
9230 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
9231 uIntType));
9232
9233 /*
9234 * Inject the event and get any changes to the guest-interruptibility state.
9235 *
9236 * The guest-interruptibility state may need to be updated if we inject the event
9237 * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
9238 */
9239 rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVmxTransient, &pVCpu->hm.s.Event, fStepping, &fIntrState);
9240 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
9241
9242 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
9243 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
9244 else
9245 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
9246 }
9247
9248 /*
9249 * Deliver any pending debug exceptions if the guest is single-stepping using EFLAGS.TF and
9250 * is an interrupt shadow (block-by-STI or block-by-MOV SS).
9251 */
9252 if ( (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
9253 && !pVmxTransient->fIsNestedGuest)
9254 {
9255 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS);
9256
9257 if (!pVCpu->hm.s.fSingleInstruction)
9258 {
9259 /*
9260 * Set or clear the BS bit depending on whether the trap flag is active or not. We need
9261 * to do both since we clear the BS bit from the VMCS while exiting to ring-3.
9262 */
9263 Assert(!DBGFIsStepping(pVCpu));
9264 uint8_t const fTrapFlag = !!(pVCpu->cpum.GstCtx.eflags.u32 & X86_EFL_TF);
9265 int rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, fTrapFlag << VMX_BF_VMCS_PENDING_DBG_XCPT_BS_SHIFT);
9266 AssertRC(rc);
9267 }
9268 else
9269 {
9270 /*
9271 * We must not deliver a debug exception when single-stepping over STI/Mov-SS in the
9272 * hypervisor debugger using EFLAGS.TF but rather clear interrupt inhibition. However,
9273 * we take care of this case in hmR0VmxExportSharedDebugState and also the case if
9274 * we use MTF, so just make sure it's called before executing guest-code.
9275 */
9276 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR_MASK);
9277 }
9278 }
9279 /* else: for nested-guest currently handling while merging controls. */
9280
9281 /*
9282 * Finally, update the guest-interruptibility state.
9283 *
9284 * This is required for the real-on-v86 software interrupt injection, for
9285 * pending debug exceptions as well as updates to the guest state from ring-3 (IEM).
9286 */
9287 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
9288 AssertRC(rc);
9289
9290 /*
9291 * There's no need to clear the VM-entry interruption-information field here if we're not
9292 * injecting anything. VT-x clears the valid bit on every VM-exit.
9293 *
9294 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
9295 */
9296
9297 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
9298 return rcStrict;
9299}
9300
9301
9302/**
9303 * Enters the VT-x session.
9304 *
9305 * @returns VBox status code.
9306 * @param pVCpu The cross context virtual CPU structure.
9307 */
9308VMMR0DECL(int) VMXR0Enter(PVMCPUCC pVCpu)
9309{
9310 AssertPtr(pVCpu);
9311 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported);
9312 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
9313
9314 LogFlowFunc(("pVCpu=%p\n", pVCpu));
9315 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
9316 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
9317
9318#ifdef VBOX_STRICT
9319 /* At least verify VMX is enabled, since we can't check if we're in VMX root mode without #GP'ing. */
9320 RTCCUINTREG uHostCr4 = ASMGetCR4();
9321 if (!(uHostCr4 & X86_CR4_VMXE))
9322 {
9323 LogRelFunc(("X86_CR4_VMXE bit in CR4 is not set!\n"));
9324 return VERR_VMX_X86_CR4_VMXE_CLEARED;
9325 }
9326#endif
9327
9328 /*
9329 * Do the EMT scheduled L1D and MDS flush here if needed.
9330 */
9331 if (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_L1D_SCHED)
9332 ASMWrMsr(MSR_IA32_FLUSH_CMD, MSR_IA32_FLUSH_CMD_F_L1D);
9333 else if (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_MDS_SCHED)
9334 hmR0MdsClear();
9335
9336 /*
9337 * Load the appropriate VMCS as the current and active one.
9338 */
9339 PVMXVMCSINFO pVmcsInfo;
9340 bool const fInNestedGuestMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
9341 if (!fInNestedGuestMode)
9342 pVmcsInfo = &pVCpu->hmr0.s.vmx.VmcsInfo;
9343 else
9344 pVmcsInfo = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
9345 int rc = hmR0VmxLoadVmcs(pVmcsInfo);
9346 if (RT_SUCCESS(rc))
9347 {
9348 pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs = fInNestedGuestMode;
9349 pVCpu->hm.s.vmx.fSwitchedToNstGstVmcsCopyForRing3 = fInNestedGuestMode;
9350 pVCpu->hmr0.s.fLeaveDone = false;
9351 Log4Func(("Loaded Vmcs. HostCpuId=%u\n", RTMpCpuId()));
9352 }
9353 return rc;
9354}
9355
9356
9357/**
9358 * The thread-context callback.
9359 *
9360 * This is used together with RTThreadCtxHookCreate() on platforms which
9361 * supports it, and directly from VMMR0EmtPrepareForBlocking() and
9362 * VMMR0EmtResumeAfterBlocking() on platforms which don't.
9363 *
9364 * @param enmEvent The thread-context event.
9365 * @param pVCpu The cross context virtual CPU structure.
9366 * @param fGlobalInit Whether global VT-x/AMD-V init. was used.
9367 * @thread EMT(pVCpu)
9368 */
9369VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPUCC pVCpu, bool fGlobalInit)
9370{
9371 AssertPtr(pVCpu);
9372 RT_NOREF1(fGlobalInit);
9373
9374 switch (enmEvent)
9375 {
9376 case RTTHREADCTXEVENT_OUT:
9377 {
9378 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
9379 VMCPU_ASSERT_EMT(pVCpu);
9380
9381 /* No longjmps (logger flushes, locks) in this fragile context. */
9382 VMMRZCallRing3Disable(pVCpu);
9383 Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId()));
9384
9385 /* Restore host-state (FPU, debug etc.) */
9386 if (!pVCpu->hmr0.s.fLeaveDone)
9387 {
9388 /*
9389 * Do -not- import the guest-state here as we might already be in the middle of importing
9390 * it, esp. bad if we're holding the PGM lock, see comment in hmR0VmxImportGuestState().
9391 */
9392 hmR0VmxLeave(pVCpu, false /* fImportState */);
9393 pVCpu->hmr0.s.fLeaveDone = true;
9394 }
9395
9396 /* Leave HM context, takes care of local init (term). */
9397 int rc = HMR0LeaveCpu(pVCpu);
9398 AssertRC(rc);
9399
9400 /* Restore longjmp state. */
9401 VMMRZCallRing3Enable(pVCpu);
9402 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreempt);
9403 break;
9404 }
9405
9406 case RTTHREADCTXEVENT_IN:
9407 {
9408 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
9409 VMCPU_ASSERT_EMT(pVCpu);
9410
9411 /* Do the EMT scheduled L1D and MDS flush here if needed. */
9412 if (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_L1D_SCHED)
9413 ASMWrMsr(MSR_IA32_FLUSH_CMD, MSR_IA32_FLUSH_CMD_F_L1D);
9414 else if (pVCpu->hmr0.s.fWorldSwitcher & HM_WSF_MDS_SCHED)
9415 hmR0MdsClear();
9416
9417 /* No longjmps here, as we don't want to trigger preemption (& its hook) while resuming. */
9418 VMMRZCallRing3Disable(pVCpu);
9419 Log4Func(("Resumed: HostCpuId=%u\n", RTMpCpuId()));
9420
9421 /* Initialize the bare minimum state required for HM. This takes care of
9422 initializing VT-x if necessary (onlined CPUs, local init etc.) */
9423 int rc = hmR0EnterCpu(pVCpu);
9424 AssertRC(rc);
9425 Assert( (pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE))
9426 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE));
9427
9428 /* Load the active VMCS as the current one. */
9429 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
9430 rc = hmR0VmxLoadVmcs(pVmcsInfo);
9431 AssertRC(rc);
9432 Log4Func(("Resumed: Loaded Vmcs. HostCpuId=%u\n", RTMpCpuId()));
9433 pVCpu->hmr0.s.fLeaveDone = false;
9434
9435 /* Restore longjmp state. */
9436 VMMRZCallRing3Enable(pVCpu);
9437 break;
9438 }
9439
9440 default:
9441 break;
9442 }
9443}
9444
9445
9446/**
9447 * Exports the host state into the VMCS host-state area.
9448 * Sets up the VM-exit MSR-load area.
9449 *
9450 * The CPU state will be loaded from these fields on every successful VM-exit.
9451 *
9452 * @returns VBox status code.
9453 * @param pVCpu The cross context virtual CPU structure.
9454 *
9455 * @remarks No-long-jump zone!!!
9456 */
9457static int hmR0VmxExportHostState(PVMCPUCC pVCpu)
9458{
9459 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
9460
9461 int rc = VINF_SUCCESS;
9462 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)
9463 {
9464 uint64_t uHostCr4 = hmR0VmxExportHostControlRegs();
9465
9466 rc = hmR0VmxExportHostSegmentRegs(pVCpu, uHostCr4);
9467 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9468
9469 hmR0VmxExportHostMsrs(pVCpu);
9470
9471 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_HOST_CONTEXT;
9472 }
9473 return rc;
9474}
9475
9476
9477/**
9478 * Saves the host state in the VMCS host-state.
9479 *
9480 * @returns VBox status code.
9481 * @param pVCpu The cross context virtual CPU structure.
9482 *
9483 * @remarks No-long-jump zone!!!
9484 */
9485VMMR0DECL(int) VMXR0ExportHostState(PVMCPUCC pVCpu)
9486{
9487 AssertPtr(pVCpu);
9488 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
9489
9490 /*
9491 * Export the host state here while entering HM context.
9492 * When thread-context hooks are used, we might get preempted and have to re-save the host
9493 * state but most of the time we won't be, so do it here before we disable interrupts.
9494 */
9495 return hmR0VmxExportHostState(pVCpu);
9496}
9497
9498
9499/**
9500 * Exports the guest state into the VMCS guest-state area.
9501 *
9502 * The will typically be done before VM-entry when the guest-CPU state and the
9503 * VMCS state may potentially be out of sync.
9504 *
9505 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas. Sets up the
9506 * VM-entry controls.
9507 * Sets up the appropriate VMX non-root function to execute guest code based on
9508 * the guest CPU mode.
9509 *
9510 * @returns VBox strict status code.
9511 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
9512 * without unrestricted guest execution and the VMMDev is not presently
9513 * mapped (e.g. EFI32).
9514 *
9515 * @param pVCpu The cross context virtual CPU structure.
9516 * @param pVmxTransient The VMX-transient structure.
9517 *
9518 * @remarks No-long-jump zone!!!
9519 */
9520static VBOXSTRICTRC hmR0VmxExportGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9521{
9522 AssertPtr(pVCpu);
9523 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
9524 LogFlowFunc(("pVCpu=%p\n", pVCpu));
9525
9526 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);
9527
9528 /*
9529 * Determine real-on-v86 mode.
9530 * Used when the guest is in real-mode and unrestricted guest execution is not used.
9531 */
9532 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmxTransient->pVmcsInfo->pShared;
9533 if ( pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest
9534 || !CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx))
9535 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
9536 else
9537 {
9538 Assert(!pVmxTransient->fIsNestedGuest);
9539 pVmcsInfoShared->RealMode.fRealOnV86Active = true;
9540 }
9541
9542 /*
9543 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
9544 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it.
9545 */
9546 int rc = hmR0VmxExportGuestEntryExitCtls(pVCpu, pVmxTransient);
9547 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9548
9549 rc = hmR0VmxExportGuestCR0(pVCpu, pVmxTransient);
9550 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9551
9552 VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu, pVmxTransient);
9553 if (rcStrict == VINF_SUCCESS)
9554 { /* likely */ }
9555 else
9556 {
9557 Assert(rcStrict == VINF_EM_RESCHEDULE_REM || RT_FAILURE_NP(rcStrict));
9558 return rcStrict;
9559 }
9560
9561 rc = hmR0VmxExportGuestSegRegsXdtr(pVCpu, pVmxTransient);
9562 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9563
9564 rc = hmR0VmxExportGuestMsrs(pVCpu, pVmxTransient);
9565 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9566
9567 hmR0VmxExportGuestApicTpr(pVCpu, pVmxTransient);
9568 hmR0VmxExportGuestXcptIntercepts(pVCpu, pVmxTransient);
9569 hmR0VmxExportGuestRip(pVCpu);
9570 hmR0VmxExportGuestRsp(pVCpu);
9571 hmR0VmxExportGuestRflags(pVCpu, pVmxTransient);
9572
9573 rc = hmR0VmxExportGuestHwvirtState(pVCpu, pVmxTransient);
9574 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
9575
9576 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
9577 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP)
9578 | HM_CHANGED_GUEST_CR2
9579 | (HM_CHANGED_GUEST_DR_MASK & ~HM_CHANGED_GUEST_DR7)
9580 | HM_CHANGED_GUEST_X87
9581 | HM_CHANGED_GUEST_SSE_AVX
9582 | HM_CHANGED_GUEST_OTHER_XSAVE
9583 | HM_CHANGED_GUEST_XCRx
9584 | HM_CHANGED_GUEST_KERNEL_GS_BASE /* Part of lazy or auto load-store MSRs. */
9585 | HM_CHANGED_GUEST_SYSCALL_MSRS /* Part of lazy or auto load-store MSRs. */
9586 | HM_CHANGED_GUEST_TSC_AUX
9587 | HM_CHANGED_GUEST_OTHER_MSRS
9588 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK)));
9589
9590 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x);
9591 return rc;
9592}
9593
9594
9595/**
9596 * Exports the state shared between the host and guest into the VMCS.
9597 *
9598 * @param pVCpu The cross context virtual CPU structure.
9599 * @param pVmxTransient The VMX-transient structure.
9600 *
9601 * @remarks No-long-jump zone!!!
9602 */
9603static void hmR0VmxExportSharedState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9604{
9605 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
9606 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
9607
9608 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK)
9609 {
9610 int rc = hmR0VmxExportSharedDebugState(pVCpu, pVmxTransient);
9611 AssertRC(rc);
9612 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK;
9613
9614 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
9615 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS)
9616 hmR0VmxExportGuestRflags(pVCpu, pVmxTransient);
9617 }
9618
9619 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS)
9620 {
9621 hmR0VmxLazyLoadGuestMsrs(pVCpu);
9622 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS;
9623 }
9624
9625 AssertMsg(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE),
9626 ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
9627}
9628
9629
9630/**
9631 * Worker for loading the guest-state bits in the inner VT-x execution loop.
9632 *
9633 * @returns Strict VBox status code (i.e. informational status codes too).
9634 * @retval VINF_EM_RESCHEDULE_REM if we try to emulate non-paged guest code
9635 * without unrestricted guest execution and the VMMDev is not presently
9636 * mapped (e.g. EFI32).
9637 *
9638 * @param pVCpu The cross context virtual CPU structure.
9639 * @param pVmxTransient The VMX-transient structure.
9640 *
9641 * @remarks No-long-jump zone!!!
9642 */
9643static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
9644{
9645 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
9646 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
9647
9648#ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
9649 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
9650#endif
9651
9652 /*
9653 * For many VM-exits only RIP/RSP/RFLAGS (and HWVIRT state when executing a nested-guest)
9654 * changes. First try to export only these without going through all other changed-flag checks.
9655 */
9656 VBOXSTRICTRC rcStrict;
9657 uint64_t const fCtxMask = HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE;
9658 uint64_t const fMinimalMask = HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT;
9659 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
9660
9661 /* If only RIP/RSP/RFLAGS/HWVIRT changed, export only those (quicker, happens more often).*/
9662 if ( (fCtxChanged & fMinimalMask)
9663 && !(fCtxChanged & (fCtxMask & ~fMinimalMask)))
9664 {
9665 hmR0VmxExportGuestRip(pVCpu);
9666 hmR0VmxExportGuestRsp(pVCpu);
9667 hmR0VmxExportGuestRflags(pVCpu, pVmxTransient);
9668 rcStrict = hmR0VmxExportGuestHwvirtState(pVCpu, pVmxTransient);
9669 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportMinimal);
9670 }
9671 /* If anything else also changed, go through the full export routine and export as required. */
9672 else if (fCtxChanged & fCtxMask)
9673 {
9674 rcStrict = hmR0VmxExportGuestState(pVCpu, pVmxTransient);
9675 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
9676 { /* likely */}
9677 else
9678 {
9679 AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, ("Failed to export guest state! rc=%Rrc\n",
9680 VBOXSTRICTRC_VAL(rcStrict)));
9681 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
9682 return rcStrict;
9683 }
9684 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
9685 }
9686 /* Nothing changed, nothing to load here. */
9687 else
9688 rcStrict = VINF_SUCCESS;
9689
9690#ifdef VBOX_STRICT
9691 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */
9692 uint64_t const fCtxChangedCur = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);
9693 AssertMsg(!(fCtxChangedCur & fCtxMask), ("fCtxChangedCur=%#RX64\n", fCtxChangedCur));
9694#endif
9695 return rcStrict;
9696}
9697
9698
9699/**
9700 * Tries to determine what part of the guest-state VT-x has deemed as invalid
9701 * and update error record fields accordingly.
9702 *
9703 * @returns VMX_IGS_* error codes.
9704 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
9705 * wrong with the guest state.
9706 *
9707 * @param pVCpu The cross context virtual CPU structure.
9708 * @param pVmcsInfo The VMCS info. object.
9709 *
9710 * @remarks This function assumes our cache of the VMCS controls
9711 * are valid, i.e. hmR0VmxCheckCachedVmcsCtls() succeeded.
9712 */
9713static uint32_t hmR0VmxCheckGuestState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
9714{
9715#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
9716#define HMVMX_CHECK_BREAK(expr, err) do { \
9717 if (!(expr)) { uError = (err); break; } \
9718 } while (0)
9719
9720 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9721 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
9722 uint32_t uError = VMX_IGS_ERROR;
9723 uint32_t u32IntrState = 0;
9724 bool const fUnrestrictedGuest = pVM->hmr0.s.vmx.fUnrestrictedGuest;
9725 do
9726 {
9727 int rc;
9728
9729 /*
9730 * Guest-interruptibility state.
9731 *
9732 * Read this first so that any check that fails prior to those that actually
9733 * require the guest-interruptibility state would still reflect the correct
9734 * VMCS value and avoids causing further confusion.
9735 */
9736 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32IntrState);
9737 AssertRC(rc);
9738
9739 uint32_t u32Val;
9740 uint64_t u64Val;
9741
9742 /*
9743 * CR0.
9744 */
9745 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
9746 uint64_t fSetCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 & g_HmMsrs.u.vmx.u64Cr0Fixed1);
9747 uint64_t const fZapCr0 = (g_HmMsrs.u.vmx.u64Cr0Fixed0 | g_HmMsrs.u.vmx.u64Cr0Fixed1);
9748 /* Exceptions for unrestricted guest execution for CR0 fixed bits (PE, PG).
9749 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
9750 if (fUnrestrictedGuest)
9751 fSetCr0 &= ~(uint64_t)(X86_CR0_PE | X86_CR0_PG);
9752
9753 uint64_t u64GuestCr0;
9754 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_CR0, &u64GuestCr0);
9755 AssertRC(rc);
9756 HMVMX_CHECK_BREAK((u64GuestCr0 & fSetCr0) == fSetCr0, VMX_IGS_CR0_FIXED1);
9757 HMVMX_CHECK_BREAK(!(u64GuestCr0 & ~fZapCr0), VMX_IGS_CR0_FIXED0);
9758 if ( !fUnrestrictedGuest
9759 && (u64GuestCr0 & X86_CR0_PG)
9760 && !(u64GuestCr0 & X86_CR0_PE))
9761 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
9762
9763 /*
9764 * CR4.
9765 */
9766 /** @todo Why do we need to OR and AND the fixed-0 and fixed-1 bits below? */
9767 uint64_t const fSetCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 & g_HmMsrs.u.vmx.u64Cr4Fixed1);
9768 uint64_t const fZapCr4 = (g_HmMsrs.u.vmx.u64Cr4Fixed0 | g_HmMsrs.u.vmx.u64Cr4Fixed1);
9769
9770 uint64_t u64GuestCr4;
9771 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_CR4, &u64GuestCr4);
9772 AssertRC(rc);
9773 HMVMX_CHECK_BREAK((u64GuestCr4 & fSetCr4) == fSetCr4, VMX_IGS_CR4_FIXED1);
9774 HMVMX_CHECK_BREAK(!(u64GuestCr4 & ~fZapCr4), VMX_IGS_CR4_FIXED0);
9775
9776 /*
9777 * IA32_DEBUGCTL MSR.
9778 */
9779 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
9780 AssertRC(rc);
9781 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
9782 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
9783 {
9784 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
9785 }
9786 uint64_t u64DebugCtlMsr = u64Val;
9787
9788#ifdef VBOX_STRICT
9789 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
9790 AssertRC(rc);
9791 Assert(u32Val == pVmcsInfo->u32EntryCtls);
9792#endif
9793 bool const fLongModeGuest = RT_BOOL(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
9794
9795 /*
9796 * RIP and RFLAGS.
9797 */
9798 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_RIP, &u64Val);
9799 AssertRC(rc);
9800 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
9801 if ( !fLongModeGuest
9802 || !pCtx->cs.Attr.n.u1Long)
9803 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
9804 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
9805 * must be identical if the "IA-32e mode guest" VM-entry
9806 * control is 1 and CS.L is 1. No check applies if the
9807 * CPU supports 64 linear-address bits. */
9808
9809 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
9810 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_RFLAGS, &u64Val);
9811 AssertRC(rc);
9812 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
9813 VMX_IGS_RFLAGS_RESERVED);
9814 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
9815 uint32_t const u32Eflags = u64Val;
9816
9817 if ( fLongModeGuest
9818 || ( fUnrestrictedGuest
9819 && !(u64GuestCr0 & X86_CR0_PE)))
9820 {
9821 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
9822 }
9823
9824 uint32_t u32EntryInfo;
9825 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
9826 AssertRC(rc);
9827 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
9828 HMVMX_CHECK_BREAK(u32Eflags & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
9829
9830 /*
9831 * 64-bit checks.
9832 */
9833 if (fLongModeGuest)
9834 {
9835 HMVMX_CHECK_BREAK(u64GuestCr0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
9836 HMVMX_CHECK_BREAK(u64GuestCr4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
9837 }
9838
9839 if ( !fLongModeGuest
9840 && (u64GuestCr4 & X86_CR4_PCIDE))
9841 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
9842
9843 /** @todo CR3 field must be such that bits 63:52 and bits in the range
9844 * 51:32 beyond the processor's physical-address width are 0. */
9845
9846 if ( (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
9847 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
9848 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
9849
9850 rc = VMXReadVmcsNw(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
9851 AssertRC(rc);
9852 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
9853
9854 rc = VMXReadVmcsNw(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
9855 AssertRC(rc);
9856 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
9857
9858 /*
9859 * PERF_GLOBAL MSR.
9860 */
9861 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)
9862 {
9863 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
9864 AssertRC(rc);
9865 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
9866 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
9867 }
9868
9869 /*
9870 * PAT MSR.
9871 */
9872 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
9873 {
9874 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
9875 AssertRC(rc);
9876 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
9877 for (unsigned i = 0; i < 8; i++)
9878 {
9879 uint8_t u8Val = (u64Val & 0xff);
9880 if ( u8Val != 0 /* UC */
9881 && u8Val != 1 /* WC */
9882 && u8Val != 4 /* WT */
9883 && u8Val != 5 /* WP */
9884 && u8Val != 6 /* WB */
9885 && u8Val != 7 /* UC- */)
9886 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
9887 u64Val >>= 8;
9888 }
9889 }
9890
9891 /*
9892 * EFER MSR.
9893 */
9894 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
9895 {
9896 Assert(g_fHmVmxSupportsVmcsEfer);
9897 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
9898 AssertRC(rc);
9899 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
9900 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
9901 HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL( pVmcsInfo->u32EntryCtls
9902 & VMX_ENTRY_CTLS_IA32E_MODE_GUEST),
9903 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
9904 /** @todo r=ramshankar: Unrestricted check here is probably wrong, see
9905 * iemVmxVmentryCheckGuestState(). */
9906 HMVMX_CHECK_BREAK( fUnrestrictedGuest
9907 || !(u64GuestCr0 & X86_CR0_PG)
9908 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME),
9909 VMX_IGS_EFER_LMA_LME_MISMATCH);
9910 }
9911
9912 /*
9913 * Segment registers.
9914 */
9915 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
9916 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
9917 if (!(u32Eflags & X86_EFL_VM))
9918 {
9919 /* CS */
9920 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
9921 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
9922 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
9923 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
9924 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9925 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
9926 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
9927 /* CS cannot be loaded with NULL in protected mode. */
9928 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
9929 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
9930 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
9931 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
9932 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
9933 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
9934 else if (fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
9935 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
9936 else
9937 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
9938
9939 /* SS */
9940 HMVMX_CHECK_BREAK( fUnrestrictedGuest
9941 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
9942 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
9943 if ( !(pCtx->cr0 & X86_CR0_PE)
9944 || pCtx->cs.Attr.n.u4Type == 3)
9945 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
9946
9947 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
9948 {
9949 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
9950 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
9951 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
9952 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
9953 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
9954 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9955 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
9956 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
9957 }
9958
9959 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxExportGuestSReg(). */
9960 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
9961 {
9962 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
9963 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
9964 HMVMX_CHECK_BREAK( fUnrestrictedGuest
9965 || pCtx->ds.Attr.n.u4Type > 11
9966 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9967 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
9968 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
9969 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
9970 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9971 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
9972 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
9973 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9974 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
9975 }
9976 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
9977 {
9978 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
9979 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
9980 HMVMX_CHECK_BREAK( fUnrestrictedGuest
9981 || pCtx->es.Attr.n.u4Type > 11
9982 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
9983 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
9984 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
9985 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
9986 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9987 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
9988 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
9989 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
9990 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
9991 }
9992 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
9993 {
9994 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
9995 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
9996 HMVMX_CHECK_BREAK( fUnrestrictedGuest
9997 || pCtx->fs.Attr.n.u4Type > 11
9998 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
9999 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
10000 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
10001 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
10002 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
10003 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
10004 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
10005 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10006 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
10007 }
10008 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
10009 {
10010 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
10011 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
10012 HMVMX_CHECK_BREAK( fUnrestrictedGuest
10013 || pCtx->gs.Attr.n.u4Type > 11
10014 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
10015 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
10016 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
10017 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
10018 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
10019 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
10020 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
10021 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
10022 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
10023 }
10024 /* 64-bit capable CPUs. */
10025 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
10026 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
10027 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
10028 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
10029 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
10030 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
10031 VMX_IGS_LONGMODE_SS_BASE_INVALID);
10032 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
10033 VMX_IGS_LONGMODE_DS_BASE_INVALID);
10034 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
10035 VMX_IGS_LONGMODE_ES_BASE_INVALID);
10036 }
10037 else
10038 {
10039 /* V86 mode checks. */
10040 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
10041 if (pVmcsInfo->pShared->RealMode.fRealOnV86Active)
10042 {
10043 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
10044 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
10045 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
10046 }
10047 else
10048 {
10049 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
10050 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
10051 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
10052 }
10053
10054 /* CS */
10055 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
10056 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
10057 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
10058 /* SS */
10059 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
10060 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
10061 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
10062 /* DS */
10063 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
10064 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
10065 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
10066 /* ES */
10067 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
10068 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
10069 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
10070 /* FS */
10071 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
10072 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
10073 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
10074 /* GS */
10075 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
10076 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
10077 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
10078 /* 64-bit capable CPUs. */
10079 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
10080 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
10081 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
10082 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
10083 HMVMX_CHECK_BREAK(!RT_HI_U32(pCtx->cs.u64Base), VMX_IGS_LONGMODE_CS_BASE_INVALID);
10084 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ss.u64Base),
10085 VMX_IGS_LONGMODE_SS_BASE_INVALID);
10086 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->ds.u64Base),
10087 VMX_IGS_LONGMODE_DS_BASE_INVALID);
10088 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !RT_HI_U32(pCtx->es.u64Base),
10089 VMX_IGS_LONGMODE_ES_BASE_INVALID);
10090 }
10091
10092 /*
10093 * TR.
10094 */
10095 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
10096 /* 64-bit capable CPUs. */
10097 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
10098 if (fLongModeGuest)
10099 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
10100 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
10101 else
10102 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
10103 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
10104 VMX_IGS_TR_ATTR_TYPE_INVALID);
10105 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
10106 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
10107 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
10108 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
10109 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
10110 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
10111 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
10112 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
10113
10114 /*
10115 * GDTR and IDTR (64-bit capable checks).
10116 */
10117 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
10118 AssertRC(rc);
10119 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
10120
10121 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
10122 AssertRC(rc);
10123 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
10124
10125 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
10126 AssertRC(rc);
10127 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
10128
10129 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
10130 AssertRC(rc);
10131 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
10132
10133 /*
10134 * Guest Non-Register State.
10135 */
10136 /* Activity State. */
10137 uint32_t u32ActivityState;
10138 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
10139 AssertRC(rc);
10140 HMVMX_CHECK_BREAK( !u32ActivityState
10141 || (u32ActivityState & RT_BF_GET(g_HmMsrs.u.vmx.u64Misc, VMX_BF_MISC_ACTIVITY_STATES)),
10142 VMX_IGS_ACTIVITY_STATE_INVALID);
10143 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
10144 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
10145
10146 if ( u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
10147 || u32IntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
10148 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
10149
10150 /** @todo Activity state and injecting interrupts. Left as a todo since we
10151 * currently don't use activity states but ACTIVE. */
10152
10153 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
10154 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
10155
10156 /* Guest interruptibility-state. */
10157 HMVMX_CHECK_BREAK(!(u32IntrState & 0xffffffe0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
10158 HMVMX_CHECK_BREAK((u32IntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
10159 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
10160 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
10161 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
10162 || !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
10163 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
10164 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(u32EntryInfo))
10165 {
10166 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
10167 && !(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
10168 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
10169 }
10170 else if (VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
10171 {
10172 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS),
10173 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
10174 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI),
10175 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
10176 }
10177 /** @todo Assumes the processor is not in SMM. */
10178 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
10179 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
10180 HMVMX_CHECK_BREAK( !(pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)
10181 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI),
10182 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
10183 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
10184 && VMX_ENTRY_INT_INFO_IS_XCPT_NMI(u32EntryInfo))
10185 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI), VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
10186
10187 /* Pending debug exceptions. */
10188 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, &u64Val);
10189 AssertRC(rc);
10190 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
10191 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
10192 u32Val = u64Val; /* For pending debug exceptions checks below. */
10193
10194 if ( (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
10195 || (u32IntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
10196 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
10197 {
10198 if ( (u32Eflags & X86_EFL_TF)
10199 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
10200 {
10201 /* Bit 14 is PendingDebug.BS. */
10202 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
10203 }
10204 if ( !(u32Eflags & X86_EFL_TF)
10205 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
10206 {
10207 /* Bit 14 is PendingDebug.BS. */
10208 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
10209 }
10210 }
10211
10212 /* VMCS link pointer. */
10213 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
10214 AssertRC(rc);
10215 if (u64Val != UINT64_C(0xffffffffffffffff))
10216 {
10217 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
10218 /** @todo Bits beyond the processor's physical-address width MBZ. */
10219 /** @todo SMM checks. */
10220 Assert(pVmcsInfo->HCPhysShadowVmcs == u64Val);
10221 Assert(pVmcsInfo->pvShadowVmcs);
10222 VMXVMCSREVID VmcsRevId;
10223 VmcsRevId.u = *(uint32_t *)pVmcsInfo->pvShadowVmcs;
10224 HMVMX_CHECK_BREAK(VmcsRevId.n.u31RevisionId == RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_ID),
10225 VMX_IGS_VMCS_LINK_PTR_SHADOW_VMCS_ID_INVALID);
10226 HMVMX_CHECK_BREAK(VmcsRevId.n.fIsShadowVmcs == (uint32_t)!!(pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING),
10227 VMX_IGS_VMCS_LINK_PTR_NOT_SHADOW);
10228 }
10229
10230 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries when guest is
10231 * not using nested paging? */
10232 if ( pVM->hmr0.s.fNestedPaging
10233 && !fLongModeGuest
10234 && CPUMIsGuestInPAEModeEx(pCtx))
10235 {
10236 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &u64Val);
10237 AssertRC(rc);
10238 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10239
10240 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &u64Val);
10241 AssertRC(rc);
10242 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10243
10244 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &u64Val);
10245 AssertRC(rc);
10246 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10247
10248 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &u64Val);
10249 AssertRC(rc);
10250 HMVMX_CHECK_BREAK(!(u64Val & X86_PDPE_PAE_MBZ_MASK), VMX_IGS_PAE_PDPTE_RESERVED);
10251 }
10252
10253 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
10254 if (uError == VMX_IGS_ERROR)
10255 uError = VMX_IGS_REASON_NOT_FOUND;
10256 } while (0);
10257
10258 pVCpu->hm.s.u32HMError = uError;
10259 pVCpu->hm.s.vmx.LastError.u32GuestIntrState = u32IntrState;
10260 return uError;
10261
10262#undef HMVMX_ERROR_BREAK
10263#undef HMVMX_CHECK_BREAK
10264}
10265
10266
10267/**
10268 * Map the APIC-access page for virtualizing APIC accesses.
10269 *
10270 * This can cause a longjumps to R3 due to the acquisition of the PGM lock. Hence,
10271 * this not done as part of exporting guest state, see @bugref{8721}.
10272 *
10273 * @returns VBox status code.
10274 * @param pVCpu The cross context virtual CPU structure.
10275 */
10276static int hmR0VmxMapHCApicAccessPage(PVMCPUCC pVCpu)
10277{
10278 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10279 uint64_t const u64MsrApicBase = APICGetBaseMsrNoCheck(pVCpu);
10280
10281 Assert(PDMHasApic(pVM));
10282 Assert(u64MsrApicBase);
10283
10284 RTGCPHYS const GCPhysApicBase = u64MsrApicBase & PAGE_BASE_GC_MASK;
10285 Log4Func(("Mappping HC APIC-access page at %#RGp\n", GCPhysApicBase));
10286
10287 /* Unalias the existing mapping. */
10288 int rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
10289 AssertRCReturn(rc, rc);
10290
10291 /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */
10292 Assert(pVM->hmr0.s.vmx.HCPhysApicAccess != NIL_RTHCPHYS);
10293 rc = IOMR0MmioMapMmioHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hmr0.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
10294 AssertRCReturn(rc, rc);
10295
10296 /* Update the per-VCPU cache of the APIC base MSR. */
10297 pVCpu->hm.s.vmx.u64GstMsrApicBase = u64MsrApicBase;
10298 return VINF_SUCCESS;
10299}
10300
10301
10302/**
10303 * Worker function passed to RTMpOnSpecific() that is to be called on the target
10304 * CPU.
10305 *
10306 * @param idCpu The ID for the CPU the function is called on.
10307 * @param pvUser1 Null, not used.
10308 * @param pvUser2 Null, not used.
10309 */
10310static DECLCALLBACK(void) hmR0DispatchHostNmi(RTCPUID idCpu, void *pvUser1, void *pvUser2)
10311{
10312 RT_NOREF3(idCpu, pvUser1, pvUser2);
10313 VMXDispatchHostNmi();
10314}
10315
10316
10317/**
10318 * Dispatching an NMI on the host CPU that received it.
10319 *
10320 * @returns VBox status code.
10321 * @param pVCpu The cross context virtual CPU structure.
10322 * @param pVmcsInfo The VMCS info. object corresponding to the VMCS that was
10323 * executing when receiving the host NMI in VMX non-root
10324 * operation.
10325 */
10326static int hmR0VmxExitHostNmi(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo)
10327{
10328 RTCPUID const idCpu = pVmcsInfo->idHostCpuExec;
10329 Assert(idCpu != NIL_RTCPUID);
10330
10331 /*
10332 * We don't want to delay dispatching the NMI any more than we have to. However,
10333 * we have already chosen -not- to dispatch NMIs when interrupts were still disabled
10334 * after executing guest or nested-guest code for the following reasons:
10335 *
10336 * - We would need to perform VMREADs with interrupts disabled and is orders of
10337 * magnitude worse when we run as a nested hypervisor without VMCS shadowing
10338 * supported by the host hypervisor.
10339 *
10340 * - It affects the common VM-exit scenario and keeps interrupts disabled for a
10341 * longer period of time just for handling an edge case like host NMIs which do
10342 * not occur nearly as frequently as other VM-exits.
10343 *
10344 * Let's cover the most likely scenario first. Check if we are on the target CPU
10345 * and dispatch the NMI right away. This should be much faster than calling into
10346 * RTMpOnSpecific() machinery.
10347 */
10348 bool fDispatched = false;
10349 RTCCUINTREG const fEFlags = ASMIntDisableFlags();
10350 if (idCpu == RTMpCpuId())
10351 {
10352 VMXDispatchHostNmi();
10353 fDispatched = true;
10354 }
10355 ASMSetFlags(fEFlags);
10356 if (fDispatched)
10357 {
10358 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGC);
10359 return VINF_SUCCESS;
10360 }
10361
10362 /*
10363 * RTMpOnSpecific() waits until the worker function has run on the target CPU. So
10364 * there should be no race or recursion even if we are unlucky enough to be preempted
10365 * (to the target CPU) without dispatching the host NMI above.
10366 */
10367 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmiInGCIpi);
10368 return RTMpOnSpecific(idCpu, &hmR0DispatchHostNmi, NULL /* pvUser1 */, NULL /* pvUser2 */);
10369}
10370
10371
10372#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10373/**
10374 * Merges the guest with the nested-guest MSR bitmap in preparation of executing the
10375 * nested-guest using hardware-assisted VMX.
10376 *
10377 * @param pVCpu The cross context virtual CPU structure.
10378 * @param pVmcsInfoNstGst The nested-guest VMCS info. object.
10379 * @param pVmcsInfoGst The guest VMCS info. object.
10380 */
10381static void hmR0VmxMergeMsrBitmapNested(PCVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfoNstGst, PCVMXVMCSINFO pVmcsInfoGst)
10382{
10383 uint32_t const cbMsrBitmap = X86_PAGE_4K_SIZE;
10384 uint64_t *pu64MsrBitmap = (uint64_t *)pVmcsInfoNstGst->pvMsrBitmap;
10385 Assert(pu64MsrBitmap);
10386
10387 /*
10388 * We merge the guest MSR bitmap with the nested-guest MSR bitmap such that any
10389 * MSR that is intercepted by the guest is also intercepted while executing the
10390 * nested-guest using hardware-assisted VMX.
10391 *
10392 * Note! If the nested-guest is not using an MSR bitmap, every MSR must cause a
10393 * nested-guest VM-exit even if the outer guest is not intercepting some
10394 * MSRs. We cannot assume the caller has initialized the nested-guest
10395 * MSR bitmap in this case.
10396 *
10397 * The nested hypervisor may also switch whether it uses MSR bitmaps for
10398 * each of its VM-entry, hence initializing it once per-VM while setting
10399 * up the nested-guest VMCS is not sufficient.
10400 */
10401 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10402 if (pVmcsNstGst->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
10403 {
10404 uint64_t const *pu64MsrBitmapNstGst = (uint64_t const *)&pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap[0];
10405 uint64_t const *pu64MsrBitmapGst = (uint64_t const *)pVmcsInfoGst->pvMsrBitmap;
10406 Assert(pu64MsrBitmapNstGst);
10407 Assert(pu64MsrBitmapGst);
10408
10409 /** @todo Detect and use EVEX.POR? */
10410 uint32_t const cFrags = cbMsrBitmap / sizeof(uint64_t);
10411 for (uint32_t i = 0; i < cFrags; i++)
10412 pu64MsrBitmap[i] = pu64MsrBitmapNstGst[i] | pu64MsrBitmapGst[i];
10413 }
10414 else
10415 ASMMemFill32(pu64MsrBitmap, cbMsrBitmap, UINT32_C(0xffffffff));
10416}
10417
10418
10419/**
10420 * Merges the guest VMCS in to the nested-guest VMCS controls in preparation of
10421 * hardware-assisted VMX execution of the nested-guest.
10422 *
10423 * For a guest, we don't modify these controls once we set up the VMCS and hence
10424 * this function is never called.
10425 *
10426 * For nested-guests since the nested hypervisor provides these controls on every
10427 * nested-guest VM-entry and could potentially change them everytime we need to
10428 * merge them before every nested-guest VM-entry.
10429 *
10430 * @returns VBox status code.
10431 * @param pVCpu The cross context virtual CPU structure.
10432 */
10433static int hmR0VmxMergeVmcsNested(PVMCPUCC pVCpu)
10434{
10435 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
10436 PCVMXVMCSINFO const pVmcsInfoGst = &pVCpu->hmr0.s.vmx.VmcsInfo;
10437 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
10438
10439 /*
10440 * Merge the controls with the requirements of the guest VMCS.
10441 *
10442 * We do not need to validate the nested-guest VMX features specified in the nested-guest
10443 * VMCS with the features supported by the physical CPU as it's already done by the
10444 * VMLAUNCH/VMRESUME instruction emulation.
10445 *
10446 * This is because the VMX features exposed by CPUM (through CPUID/MSRs) to the guest are
10447 * derived from the VMX features supported by the physical CPU.
10448 */
10449
10450 /* Pin-based VM-execution controls. */
10451 uint32_t const u32PinCtls = pVmcsNstGst->u32PinCtls | pVmcsInfoGst->u32PinCtls;
10452
10453 /* Processor-based VM-execution controls. */
10454 uint32_t u32ProcCtls = (pVmcsNstGst->u32ProcCtls & ~VMX_PROC_CTLS_USE_IO_BITMAPS)
10455 | (pVmcsInfoGst->u32ProcCtls & ~( VMX_PROC_CTLS_INT_WINDOW_EXIT
10456 | VMX_PROC_CTLS_NMI_WINDOW_EXIT
10457 | VMX_PROC_CTLS_MOV_DR_EXIT
10458 | VMX_PROC_CTLS_USE_TPR_SHADOW
10459 | VMX_PROC_CTLS_MONITOR_TRAP_FLAG));
10460
10461 /* Secondary processor-based VM-execution controls. */
10462 uint32_t const u32ProcCtls2 = (pVmcsNstGst->u32ProcCtls2 & ~VMX_PROC_CTLS2_VPID)
10463 | (pVmcsInfoGst->u32ProcCtls2 & ~( VMX_PROC_CTLS2_VIRT_APIC_ACCESS
10464 | VMX_PROC_CTLS2_INVPCID
10465 | VMX_PROC_CTLS2_VMCS_SHADOWING
10466 | VMX_PROC_CTLS2_RDTSCP
10467 | VMX_PROC_CTLS2_XSAVES_XRSTORS
10468 | VMX_PROC_CTLS2_APIC_REG_VIRT
10469 | VMX_PROC_CTLS2_VIRT_INT_DELIVERY
10470 | VMX_PROC_CTLS2_VMFUNC));
10471
10472 /*
10473 * VM-entry controls:
10474 * These controls contains state that depends on the nested-guest state (primarily
10475 * EFER MSR) and is thus not constant between VMLAUNCH/VMRESUME and the nested-guest
10476 * VM-exit. Although the nested hypervisor cannot change it, we need to in order to
10477 * properly continue executing the nested-guest if the EFER MSR changes but does not
10478 * cause a nested-guest VM-exits.
10479 *
10480 * VM-exit controls:
10481 * These controls specify the host state on return. We cannot use the controls from
10482 * the nested hypervisor state as is as it would contain the guest state rather than
10483 * the host state. Since the host state is subject to change (e.g. preemption, trips
10484 * to ring-3, longjmp and rescheduling to a different host CPU) they are not constant
10485 * through VMLAUNCH/VMRESUME and the nested-guest VM-exit.
10486 *
10487 * VM-entry MSR-load:
10488 * The guest MSRs from the VM-entry MSR-load area are already loaded into the guest-CPU
10489 * context by the VMLAUNCH/VMRESUME instruction emulation.
10490 *
10491 * VM-exit MSR-store:
10492 * The VM-exit emulation will take care of populating the MSRs from the guest-CPU context
10493 * back into the VM-exit MSR-store area.
10494 *
10495 * VM-exit MSR-load areas:
10496 * This must contain the real host MSRs with hardware-assisted VMX execution. Hence, we
10497 * can entirely ignore what the nested hypervisor wants to load here.
10498 */
10499
10500 /*
10501 * Exception bitmap.
10502 *
10503 * We could remove #UD from the guest bitmap and merge it with the nested-guest bitmap
10504 * here (and avoid doing anything while exporting nested-guest state), but to keep the
10505 * code more flexible if intercepting exceptions become more dynamic in the future we do
10506 * it as part of exporting the nested-guest state.
10507 */
10508 uint32_t const u32XcptBitmap = pVmcsNstGst->u32XcptBitmap | pVmcsInfoGst->u32XcptBitmap;
10509
10510 /*
10511 * CR0/CR4 guest/host mask.
10512 *
10513 * Modifications by the nested-guest to CR0/CR4 bits owned by the host and the guest must
10514 * cause VM-exits, so we need to merge them here.
10515 */
10516 uint64_t const u64Cr0Mask = pVmcsNstGst->u64Cr0Mask.u | pVmcsInfoGst->u64Cr0Mask;
10517 uint64_t const u64Cr4Mask = pVmcsNstGst->u64Cr4Mask.u | pVmcsInfoGst->u64Cr4Mask;
10518
10519 /*
10520 * Page-fault error-code mask and match.
10521 *
10522 * Although we require unrestricted guest execution (and thereby nested-paging) for
10523 * hardware-assisted VMX execution of nested-guests and thus the outer guest doesn't
10524 * normally intercept #PFs, it might intercept them for debugging purposes.
10525 *
10526 * If the outer guest is not intercepting #PFs, we can use the nested-guest #PF filters.
10527 * If the outer guest is intercepting #PFs, we must intercept all #PFs.
10528 */
10529 uint32_t u32XcptPFMask;
10530 uint32_t u32XcptPFMatch;
10531 if (!(pVmcsInfoGst->u32XcptBitmap & RT_BIT(X86_XCPT_PF)))
10532 {
10533 u32XcptPFMask = pVmcsNstGst->u32XcptPFMask;
10534 u32XcptPFMatch = pVmcsNstGst->u32XcptPFMatch;
10535 }
10536 else
10537 {
10538 u32XcptPFMask = 0;
10539 u32XcptPFMatch = 0;
10540 }
10541
10542 /*
10543 * Pause-Loop exiting.
10544 */
10545 /** @todo r=bird: given that both pVM->hm.s.vmx.cPleGapTicks and
10546 * pVM->hm.s.vmx.cPleWindowTicks defaults to zero, I cannot see how
10547 * this will work... */
10548 uint32_t const cPleGapTicks = RT_MIN(pVM->hm.s.vmx.cPleGapTicks, pVmcsNstGst->u32PleGap);
10549 uint32_t const cPleWindowTicks = RT_MIN(pVM->hm.s.vmx.cPleWindowTicks, pVmcsNstGst->u32PleWindow);
10550
10551 /*
10552 * Pending debug exceptions.
10553 * Currently just copy whatever the nested-guest provides us.
10554 */
10555 uint64_t const uPendingDbgXcpts = pVmcsNstGst->u64GuestPendingDbgXcpts.u;
10556
10557 /*
10558 * I/O Bitmap.
10559 *
10560 * We do not use the I/O bitmap that may be provided by the nested hypervisor as we always
10561 * intercept all I/O port accesses.
10562 */
10563 Assert(u32ProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT);
10564 Assert(!(u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS));
10565
10566 /*
10567 * VMCS shadowing.
10568 *
10569 * We do not yet expose VMCS shadowing to the guest and thus VMCS shadowing should not be
10570 * enabled while executing the nested-guest.
10571 */
10572 Assert(!(u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING));
10573
10574 /*
10575 * APIC-access page.
10576 */
10577 RTHCPHYS HCPhysApicAccess;
10578 if (u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
10579 {
10580 Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
10581 RTGCPHYS const GCPhysApicAccess = pVmcsNstGst->u64AddrApicAccess.u;
10582
10583 /** @todo NSTVMX: This is not really correct but currently is required to make
10584 * things work. We need to re-enable the page handler when we fallback to
10585 * IEM execution of the nested-guest! */
10586 PGMHandlerPhysicalPageTempOff(pVM, GCPhysApicAccess, GCPhysApicAccess);
10587
10588 void *pvPage;
10589 PGMPAGEMAPLOCK PgLockApicAccess;
10590 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysApicAccess, &pvPage, &PgLockApicAccess);
10591 if (RT_SUCCESS(rc))
10592 {
10593 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysApicAccess, &HCPhysApicAccess);
10594 AssertMsgRCReturn(rc, ("Failed to get host-physical address for APIC-access page at %#RGp\n", GCPhysApicAccess), rc);
10595
10596 /** @todo Handle proper releasing of page-mapping lock later. */
10597 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &PgLockApicAccess);
10598 }
10599 else
10600 return rc;
10601 }
10602 else
10603 HCPhysApicAccess = 0;
10604
10605 /*
10606 * Virtual-APIC page and TPR threshold.
10607 */
10608 RTHCPHYS HCPhysVirtApic;
10609 uint32_t u32TprThreshold;
10610 if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
10611 {
10612 Assert(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW);
10613 RTGCPHYS const GCPhysVirtApic = pVmcsNstGst->u64AddrVirtApic.u;
10614
10615 void *pvPage;
10616 PGMPAGEMAPLOCK PgLockVirtApic;
10617 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysVirtApic, &pvPage, &PgLockVirtApic);
10618 if (RT_SUCCESS(rc))
10619 {
10620 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysVirtApic, &HCPhysVirtApic);
10621 AssertMsgRCReturn(rc, ("Failed to get host-physical address for virtual-APIC page at %#RGp\n", GCPhysVirtApic), rc);
10622
10623 /** @todo Handle proper releasing of page-mapping lock later. */
10624 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &PgLockVirtApic);
10625 }
10626 else
10627 return rc;
10628
10629 u32TprThreshold = pVmcsNstGst->u32TprThreshold;
10630 }
10631 else
10632 {
10633 HCPhysVirtApic = 0;
10634 u32TprThreshold = 0;
10635
10636 /*
10637 * We must make sure CR8 reads/write must cause VM-exits when TPR shadowing is not
10638 * used by the nested hypervisor. Preventing MMIO accesses to the physical APIC will
10639 * be taken care of by EPT/shadow paging.
10640 */
10641 if (pVM->hmr0.s.fAllow64BitGuests)
10642 u32ProcCtls |= VMX_PROC_CTLS_CR8_STORE_EXIT
10643 | VMX_PROC_CTLS_CR8_LOAD_EXIT;
10644 }
10645
10646 /*
10647 * Validate basic assumptions.
10648 */
10649 PVMXVMCSINFO pVmcsInfoNstGst = &pVCpu->hmr0.s.vmx.VmcsInfoNstGst;
10650 Assert(pVM->hmr0.s.vmx.fUnrestrictedGuest);
10651 Assert(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS);
10652 Assert(hmGetVmxActiveVmcsInfo(pVCpu) == pVmcsInfoNstGst);
10653
10654 /*
10655 * Commit it to the nested-guest VMCS.
10656 */
10657 int rc = VINF_SUCCESS;
10658 if (pVmcsInfoNstGst->u32PinCtls != u32PinCtls)
10659 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, u32PinCtls);
10660 if (pVmcsInfoNstGst->u32ProcCtls != u32ProcCtls)
10661 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, u32ProcCtls);
10662 if (pVmcsInfoNstGst->u32ProcCtls2 != u32ProcCtls2)
10663 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, u32ProcCtls2);
10664 if (pVmcsInfoNstGst->u32XcptBitmap != u32XcptBitmap)
10665 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
10666 if (pVmcsInfoNstGst->u64Cr0Mask != u64Cr0Mask)
10667 rc |= VMXWriteVmcsNw(VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask);
10668 if (pVmcsInfoNstGst->u64Cr4Mask != u64Cr4Mask)
10669 rc |= VMXWriteVmcsNw(VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask);
10670 if (pVmcsInfoNstGst->u32XcptPFMask != u32XcptPFMask)
10671 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, u32XcptPFMask);
10672 if (pVmcsInfoNstGst->u32XcptPFMatch != u32XcptPFMatch)
10673 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, u32XcptPFMatch);
10674 if ( !(u32ProcCtls & VMX_PROC_CTLS_PAUSE_EXIT)
10675 && (u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
10676 {
10677 Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT);
10678 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, cPleGapTicks);
10679 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, cPleWindowTicks);
10680 }
10681 if (u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
10682 {
10683 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
10684 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL, HCPhysVirtApic);
10685 }
10686 if (u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
10687 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, HCPhysApicAccess);
10688 rc |= VMXWriteVmcsNw(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, uPendingDbgXcpts);
10689 AssertRC(rc);
10690
10691 /*
10692 * Update the nested-guest VMCS cache.
10693 */
10694 pVmcsInfoNstGst->u32PinCtls = u32PinCtls;
10695 pVmcsInfoNstGst->u32ProcCtls = u32ProcCtls;
10696 pVmcsInfoNstGst->u32ProcCtls2 = u32ProcCtls2;
10697 pVmcsInfoNstGst->u32XcptBitmap = u32XcptBitmap;
10698 pVmcsInfoNstGst->u64Cr0Mask = u64Cr0Mask;
10699 pVmcsInfoNstGst->u64Cr4Mask = u64Cr4Mask;
10700 pVmcsInfoNstGst->u32XcptPFMask = u32XcptPFMask;
10701 pVmcsInfoNstGst->u32XcptPFMatch = u32XcptPFMatch;
10702 pVmcsInfoNstGst->HCPhysVirtApic = HCPhysVirtApic;
10703
10704 /*
10705 * We need to flush the TLB if we are switching the APIC-access page address.
10706 * See Intel spec. 28.3.3.4 "Guidelines for Use of the INVEPT Instruction".
10707 */
10708 if (u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
10709 pVCpu->hm.s.vmx.fSwitchedNstGstFlushTlb = true;
10710
10711 /*
10712 * MSR bitmap.
10713 *
10714 * The MSR bitmap address has already been initialized while setting up the nested-guest
10715 * VMCS, here we need to merge the MSR bitmaps.
10716 */
10717 if (u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
10718 hmR0VmxMergeMsrBitmapNested(pVCpu, pVmcsInfoNstGst, pVmcsInfoGst);
10719
10720 return VINF_SUCCESS;
10721}
10722#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
10723
10724
10725/**
10726 * Does the preparations before executing guest code in VT-x.
10727 *
10728 * This may cause longjmps to ring-3 and may even result in rescheduling to the
10729 * recompiler/IEM. We must be cautious what we do here regarding committing
10730 * guest-state information into the VMCS assuming we assuredly execute the
10731 * guest in VT-x mode.
10732 *
10733 * If we fall back to the recompiler/IEM after updating the VMCS and clearing
10734 * the common-state (TRPM/forceflags), we must undo those changes so that the
10735 * recompiler/IEM can (and should) use them when it resumes guest execution.
10736 * Otherwise such operations must be done when we can no longer exit to ring-3.
10737 *
10738 * @returns Strict VBox status code (i.e. informational status codes too).
10739 * @retval VINF_SUCCESS if we can proceed with running the guest, interrupts
10740 * have been disabled.
10741 * @retval VINF_VMX_VMEXIT if a nested-guest VM-exit occurs (e.g., while evaluating
10742 * pending events).
10743 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a
10744 * double-fault into the guest.
10745 * @retval VINF_EM_DBG_STEPPED if @a fStepping is true and an event was
10746 * dispatched directly.
10747 * @retval VINF_* scheduling changes, we have to go back to ring-3.
10748 *
10749 * @param pVCpu The cross context virtual CPU structure.
10750 * @param pVmxTransient The VMX-transient structure.
10751 * @param fStepping Whether we are single-stepping the guest in the
10752 * hypervisor debugger. Makes us ignore some of the reasons
10753 * for returning to ring-3, and return VINF_EM_DBG_STEPPED
10754 * if event dispatching took place.
10755 */
10756static VBOXSTRICTRC hmR0VmxPreRunGuest(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, bool fStepping)
10757{
10758 Assert(VMMRZCallRing3IsEnabled(pVCpu));
10759
10760 Log4Func(("fIsNested=%RTbool fStepping=%RTbool\n", pVmxTransient->fIsNestedGuest, fStepping));
10761
10762#ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM
10763 if (pVmxTransient->fIsNestedGuest)
10764 {
10765 RT_NOREF2(pVCpu, fStepping);
10766 Log2Func(("Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n"));
10767 return VINF_EM_RESCHEDULE_REM;
10768 }
10769#endif
10770
10771 /*
10772 * Check and process force flag actions, some of which might require us to go back to ring-3.
10773 */
10774 VBOXSTRICTRC rcStrict = hmR0VmxCheckForceFlags(pVCpu, pVmxTransient, fStepping);
10775 if (rcStrict == VINF_SUCCESS)
10776 {
10777 /* FFs don't get set all the time. */
10778#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10779 if ( pVmxTransient->fIsNestedGuest
10780 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
10781 {
10782 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
10783 return VINF_VMX_VMEXIT;
10784 }
10785#endif
10786 }
10787 else
10788 return rcStrict;
10789
10790 /*
10791 * Virtualize memory-mapped accesses to the physical APIC (may take locks).
10792 */
10793 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10794 if ( !pVCpu->hm.s.vmx.u64GstMsrApicBase
10795 && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
10796 && PDMHasApic(pVM))
10797 {
10798 int rc = hmR0VmxMapHCApicAccessPage(pVCpu);
10799 AssertRCReturn(rc, rc);
10800 }
10801
10802#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10803 /*
10804 * Merge guest VMCS controls with the nested-guest VMCS controls.
10805 *
10806 * Even if we have not executed the guest prior to this (e.g. when resuming from a
10807 * saved state), we should be okay with merging controls as we initialize the
10808 * guest VMCS controls as part of VM setup phase.
10809 */
10810 if ( pVmxTransient->fIsNestedGuest
10811 && !pVCpu->hm.s.vmx.fMergedNstGstCtls)
10812 {
10813 int rc = hmR0VmxMergeVmcsNested(pVCpu);
10814 AssertRCReturn(rc, rc);
10815 pVCpu->hm.s.vmx.fMergedNstGstCtls = true;
10816 }
10817#endif
10818
10819 /*
10820 * Evaluate events to be injected into the guest.
10821 *
10822 * Events in TRPM can be injected without inspecting the guest state.
10823 * If any new events (interrupts/NMI) are pending currently, we try to set up the
10824 * guest to cause a VM-exit the next time they are ready to receive the event.
10825 */
10826 if (TRPMHasTrap(pVCpu))
10827 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
10828
10829 uint32_t fIntrState;
10830 rcStrict = hmR0VmxEvaluatePendingEvent(pVCpu, pVmxTransient, &fIntrState);
10831
10832#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10833 /*
10834 * While evaluating pending events if something failed (unlikely) or if we were
10835 * preparing to run a nested-guest but performed a nested-guest VM-exit, we should bail.
10836 */
10837 if (rcStrict != VINF_SUCCESS)
10838 return rcStrict;
10839 if ( pVmxTransient->fIsNestedGuest
10840 && !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
10841 {
10842 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
10843 return VINF_VMX_VMEXIT;
10844 }
10845#else
10846 Assert(rcStrict == VINF_SUCCESS);
10847#endif
10848
10849 /*
10850 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus
10851 * needs to be done with longjmps or interrupts + preemption enabled. Event injection might
10852 * also result in triple-faulting the VM.
10853 *
10854 * With nested-guests, the above does not apply since unrestricted guest execution is a
10855 * requirement. Regardless, we do this here to avoid duplicating code elsewhere.
10856 */
10857 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pVmxTransient, fIntrState, fStepping);
10858 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10859 { /* likely */ }
10860 else
10861 {
10862 AssertMsg(rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping),
10863 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
10864 return rcStrict;
10865 }
10866
10867 /*
10868 * A longjump might result in importing CR3 even for VM-exits that don't necessarily
10869 * import CR3 themselves. We will need to update them here, as even as late as the above
10870 * hmR0VmxInjectPendingEvent() call may lazily import guest-CPU state on demand causing
10871 * the below force flags to be set.
10872 */
10873 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
10874 {
10875 Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_CR3));
10876 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
10877 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
10878 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
10879 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
10880 }
10881
10882#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10883 /* Paranoia. */
10884 Assert(!pVmxTransient->fIsNestedGuest || CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
10885#endif
10886
10887 /*
10888 * No longjmps to ring-3 from this point on!!!
10889 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
10890 * This also disables flushing of the R0-logger instance (if any).
10891 */
10892 VMMRZCallRing3Disable(pVCpu);
10893
10894 /*
10895 * Export the guest state bits.
10896 *
10897 * We cannot perform longjmps while loading the guest state because we do not preserve the
10898 * host/guest state (although the VMCS will be preserved) across longjmps which can cause
10899 * CPU migration.
10900 *
10901 * If we are injecting events to a real-on-v86 mode guest, we would have updated RIP and some segment
10902 * registers. Hence, exporting of the guest state needs to be done -after- injection of events.
10903 */
10904 rcStrict = hmR0VmxExportGuestStateOptimal(pVCpu, pVmxTransient);
10905 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10906 { /* likely */ }
10907 else
10908 {
10909 VMMRZCallRing3Enable(pVCpu);
10910 return rcStrict;
10911 }
10912
10913 /*
10914 * We disable interrupts so that we don't miss any interrupts that would flag preemption
10915 * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with
10916 * preemption disabled for a while. Since this is purely to aid the
10917 * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and
10918 * disable interrupt on NT.
10919 *
10920 * We need to check for force-flags that could've possible been altered since we last
10921 * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section,
10922 * see @bugref{6398}).
10923 *
10924 * We also check a couple of other force-flags as a last opportunity to get the EMT back
10925 * to ring-3 before executing guest code.
10926 */
10927 pVmxTransient->fEFlags = ASMIntDisableFlags();
10928
10929 if ( ( !VM_FF_IS_ANY_SET(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
10930 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
10931 || ( fStepping /* Optimized for the non-stepping case, so a bit of unnecessary work when stepping. */
10932 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HM_TO_R3_MASK & ~(VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT))) )
10933 {
10934 if (!RTThreadPreemptIsPending(NIL_RTTHREAD))
10935 {
10936#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10937 /*
10938 * If we are executing a nested-guest make sure that we should intercept subsequent
10939 * events. The one we are injecting might be part of VM-entry. This is mainly to keep
10940 * the VM-exit instruction emulation happy.
10941 */
10942 if (pVmxTransient->fIsNestedGuest)
10943 CPUMSetGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx, true);
10944#endif
10945
10946 /*
10947 * We've injected any pending events. This is really the point of no return (to ring-3).
10948 *
10949 * Note! The caller expects to continue with interrupts & longjmps disabled on successful
10950 * returns from this function, so do -not- enable them here.
10951 */
10952 pVCpu->hm.s.Event.fPending = false;
10953 return VINF_SUCCESS;
10954 }
10955
10956 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPendingHostIrq);
10957 rcStrict = VINF_EM_RAW_INTERRUPT;
10958 }
10959 else
10960 {
10961 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
10962 rcStrict = VINF_EM_RAW_TO_R3;
10963 }
10964
10965 ASMSetFlags(pVmxTransient->fEFlags);
10966 VMMRZCallRing3Enable(pVCpu);
10967
10968 return rcStrict;
10969}
10970
10971
10972/**
10973 * Final preparations before executing guest code using hardware-assisted VMX.
10974 *
10975 * We can no longer get preempted to a different host CPU and there are no returns
10976 * to ring-3. We ignore any errors that may happen from this point (e.g. VMWRITE
10977 * failures), this function is not intended to fail sans unrecoverable hardware
10978 * errors.
10979 *
10980 * @param pVCpu The cross context virtual CPU structure.
10981 * @param pVmxTransient The VMX-transient structure.
10982 *
10983 * @remarks Called with preemption disabled.
10984 * @remarks No-long-jump zone!!!
10985 */
10986static void hmR0VmxPreRunGuestCommitted(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
10987{
10988 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
10989 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
10990 Assert(!pVCpu->hm.s.Event.fPending);
10991
10992 /*
10993 * Indicate start of guest execution and where poking EMT out of guest-context is recognized.
10994 */
10995 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
10996 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
10997
10998 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
10999 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11000 PHMPHYSCPU pHostCpu = hmR0GetCurrentCpu();
11001 RTCPUID const idCurrentCpu = pHostCpu->idCpu;
11002
11003 if (!CPUMIsGuestFPUStateActive(pVCpu))
11004 {
11005 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x);
11006 if (CPUMR0LoadGuestFPU(pVM, pVCpu) == VINF_CPUM_HOST_CR0_MODIFIED)
11007 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT;
11008 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x);
11009 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu);
11010 }
11011
11012 /*
11013 * Re-export the host state bits as we may've been preempted (only happens when
11014 * thread-context hooks are used or when the VM start function changes) or if
11015 * the host CR0 is modified while loading the guest FPU state above.
11016 *
11017 * The 64-on-32 switcher saves the (64-bit) host state into the VMCS and if we
11018 * changed the switcher back to 32-bit, we *must* save the 32-bit host state here,
11019 * see @bugref{8432}.
11020 *
11021 * This may also happen when switching to/from a nested-guest VMCS without leaving
11022 * ring-0.
11023 */
11024 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)
11025 {
11026 hmR0VmxExportHostState(pVCpu);
11027 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportHostState);
11028 }
11029 Assert(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT));
11030
11031 /*
11032 * Export the state shared between host and guest (FPU, debug, lazy MSRs).
11033 */
11034 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)
11035 hmR0VmxExportSharedState(pVCpu, pVmxTransient);
11036 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged));
11037
11038 /*
11039 * Store status of the shared guest/host debug state at the time of VM-entry.
11040 */
11041 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
11042 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
11043
11044 /*
11045 * Always cache the TPR-shadow if the virtual-APIC page exists, thereby skipping
11046 * more than one conditional check. The post-run side of our code shall determine
11047 * if it needs to sync. the virtual APIC TPR with the TPR-shadow.
11048 */
11049 if (pVmcsInfo->pbVirtApic)
11050 pVmxTransient->u8GuestTpr = pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR];
11051
11052 /*
11053 * Update the host MSRs values in the VM-exit MSR-load area.
11054 */
11055 if (!pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs)
11056 {
11057 if (pVmcsInfo->cExitMsrLoad > 0)
11058 hmR0VmxUpdateAutoLoadHostMsrs(pVCpu, pVmcsInfo);
11059 pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs = true;
11060 }
11061
11062 /*
11063 * Evaluate if we need to intercept guest RDTSC/P accesses. Set up the
11064 * VMX-preemption timer based on the next virtual sync clock deadline.
11065 */
11066 if ( !pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer
11067 || idCurrentCpu != pVCpu->hmr0.s.idLastCpu)
11068 {
11069 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pVmxTransient, idCurrentCpu);
11070 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = true;
11071 }
11072
11073 /* Record statistics of how often we use TSC offsetting as opposed to intercepting RDTSC/P. */
11074 bool const fIsRdtscIntercepted = RT_BOOL(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT);
11075 if (!fIsRdtscIntercepted)
11076 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
11077 else
11078 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
11079
11080 ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */
11081 hmR0VmxFlushTaggedTlb(pHostCpu, pVCpu, pVmcsInfo); /* Invalidate the appropriate guest entries from the TLB. */
11082 Assert(idCurrentCpu == pVCpu->hmr0.s.idLastCpu);
11083 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Record the error reporting info. with the current host CPU. */
11084 pVmcsInfo->idHostCpuState = idCurrentCpu; /* Record the CPU for which the host-state has been exported. */
11085 pVmcsInfo->idHostCpuExec = idCurrentCpu; /* Record the CPU on which we shall execute. */
11086
11087 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
11088
11089 TMNotifyStartOfExecution(pVM, pVCpu); /* Notify TM to resume its clocks when TSC is tied to execution,
11090 as we're about to start executing the guest. */
11091
11092 /*
11093 * Load the guest TSC_AUX MSR when we are not intercepting RDTSCP.
11094 *
11095 * This is done this late as updating the TSC offsetting/preemption timer above
11096 * figures out if we can skip intercepting RDTSCP by calculating the number of
11097 * host CPU ticks till the next virtual sync deadline (for the dynamic case).
11098 */
11099 if ( (pVmcsInfo->u32ProcCtls2 & VMX_PROC_CTLS2_RDTSCP)
11100 && !fIsRdtscIntercepted)
11101 {
11102 hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_TSC_AUX);
11103
11104 /* NB: Because we call hmR0VmxAddAutoLoadStoreMsr with fUpdateHostMsr=true,
11105 it's safe even after hmR0VmxUpdateAutoLoadHostMsrs has already been done. */
11106 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu),
11107 true /* fSetReadWrite */, true /* fUpdateHostMsr */);
11108 AssertRC(rc);
11109 Assert(!pVmxTransient->fRemoveTscAuxMsr);
11110 pVmxTransient->fRemoveTscAuxMsr = true;
11111 }
11112
11113#ifdef VBOX_STRICT
11114 Assert(pVCpu->hmr0.s.vmx.fUpdatedHostAutoMsrs);
11115 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
11116 hmR0VmxCheckHostEferMsr(pVmcsInfo);
11117 AssertRC(hmR0VmxCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest));
11118#endif
11119
11120#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
11121 /** @todo r=ramshankar: We can now probably use iemVmxVmentryCheckGuestState here.
11122 * Add a PVMXMSRS parameter to it, so that IEM can look at the host MSRs,
11123 * see @bugref{9180#c54}. */
11124 uint32_t const uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pVmcsInfo);
11125 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
11126 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
11127#endif
11128}
11129
11130
11131/**
11132 * First C routine invoked after running guest code using hardware-assisted VMX.
11133 *
11134 * @param pVCpu The cross context virtual CPU structure.
11135 * @param pVmxTransient The VMX-transient structure.
11136 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
11137 *
11138 * @remarks Called with interrupts disabled, and returns with interrupts enabled!
11139 *
11140 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
11141 * unconditionally when it is safe to do so.
11142 */
11143static void hmR0VmxPostRunGuest(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, int rcVMRun)
11144{
11145 ASMAtomicUoWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
11146 ASMAtomicIncU32(&pVCpu->hmr0.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */
11147 pVCpu->hm.s.fCtxChanged = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */
11148 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
11149 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
11150 pVmxTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */
11151
11152 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11153 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_RDTSC_EXIT))
11154 {
11155 uint64_t uGstTsc;
11156 if (!pVmxTransient->fIsNestedGuest)
11157 uGstTsc = pVCpu->hmr0.s.uTscExit + pVmcsInfo->u64TscOffset;
11158 else
11159 {
11160 uint64_t const uNstGstTsc = pVCpu->hmr0.s.uTscExit + pVmcsInfo->u64TscOffset;
11161 uGstTsc = CPUMRemoveNestedGuestTscOffset(pVCpu, uNstGstTsc);
11162 }
11163 TMCpuTickSetLastSeen(pVCpu, uGstTsc); /* Update TM with the guest TSC. */
11164 }
11165
11166 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatPreExit, x);
11167 TMNotifyEndOfExecution(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->hmr0.s.uTscExit); /* Notify TM that the guest is no longer running. */
11168 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
11169
11170 pVCpu->hmr0.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Some host state messed up by VMX needs restoring. */
11171 pVmcsInfo->fVmcsState |= VMX_V_VMCS_LAUNCH_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
11172#ifdef VBOX_STRICT
11173 hmR0VmxCheckHostEferMsr(pVmcsInfo); /* Verify that the host EFER MSR wasn't modified. */
11174#endif
11175 Assert(!ASMIntAreEnabled());
11176 ASMSetFlags(pVmxTransient->fEFlags); /* Enable interrupts. */
11177 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
11178
11179#ifdef HMVMX_ALWAYS_CLEAN_TRANSIENT
11180 /*
11181 * Clean all the VMCS fields in the transient structure before reading
11182 * anything from the VMCS.
11183 */
11184 pVmxTransient->uExitReason = 0;
11185 pVmxTransient->uExitIntErrorCode = 0;
11186 pVmxTransient->uExitQual = 0;
11187 pVmxTransient->uGuestLinearAddr = 0;
11188 pVmxTransient->uExitIntInfo = 0;
11189 pVmxTransient->cbExitInstr = 0;
11190 pVmxTransient->ExitInstrInfo.u = 0;
11191 pVmxTransient->uEntryIntInfo = 0;
11192 pVmxTransient->uEntryXcptErrorCode = 0;
11193 pVmxTransient->cbEntryInstr = 0;
11194 pVmxTransient->uIdtVectoringInfo = 0;
11195 pVmxTransient->uIdtVectoringErrorCode = 0;
11196#endif
11197
11198 /*
11199 * Save the basic VM-exit reason and check if the VM-entry failed.
11200 * See Intel spec. 24.9.1 "Basic VM-exit Information".
11201 */
11202 uint32_t uExitReason;
11203 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
11204 AssertRC(rc);
11205 pVmxTransient->uExitReason = VMX_EXIT_REASON_BASIC(uExitReason);
11206 pVmxTransient->fVMEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
11207
11208 /*
11209 * Log the VM-exit before logging anything else as otherwise it might be a
11210 * tad confusing what happens before and after the world-switch.
11211 */
11212 HMVMX_LOG_EXIT(pVCpu, uExitReason);
11213
11214 /*
11215 * Remove the TSC_AUX MSR from the auto-load/store MSR area and reset any MSR
11216 * bitmap permissions, if it was added before VM-entry.
11217 */
11218 if (pVmxTransient->fRemoveTscAuxMsr)
11219 {
11220 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K8_TSC_AUX);
11221 pVmxTransient->fRemoveTscAuxMsr = false;
11222 }
11223
11224 /*
11225 * Check if VMLAUNCH/VMRESUME succeeded.
11226 * If this failed, we cause a guru meditation and cease further execution.
11227 *
11228 * However, if we are executing a nested-guest we might fail if we use the
11229 * fast path rather than fully emulating VMLAUNCH/VMRESUME instruction in IEM.
11230 */
11231 if (RT_LIKELY(rcVMRun == VINF_SUCCESS))
11232 {
11233 /*
11234 * Update the VM-exit history array here even if the VM-entry failed due to:
11235 * - Invalid guest state.
11236 * - MSR loading.
11237 * - Machine-check event.
11238 *
11239 * In any of the above cases we will still have a "valid" VM-exit reason
11240 * despite @a fVMEntryFailed being false.
11241 *
11242 * See Intel spec. 26.7 "VM-Entry failures during or after loading guest state".
11243 *
11244 * Note! We don't have CS or RIP at this point. Will probably address that later
11245 * by amending the history entry added here.
11246 */
11247 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_VMX, pVmxTransient->uExitReason & EMEXIT_F_TYPE_MASK),
11248 UINT64_MAX, pVCpu->hmr0.s.uTscExit);
11249
11250 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
11251 {
11252 VMMRZCallRing3Enable(pVCpu);
11253 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
11254
11255#ifdef HMVMX_ALWAYS_SAVE_RO_GUEST_STATE
11256 hmR0VmxReadAllRoFieldsVmcs(pVmxTransient);
11257#endif
11258
11259 /*
11260 * Always import the guest-interruptibility state as we need it while evaluating
11261 * injecting events on re-entry.
11262 *
11263 * We don't import CR0 (when unrestricted guest execution is unavailable) despite
11264 * checking for real-mode while exporting the state because all bits that cause
11265 * mode changes wrt CR0 are intercepted.
11266 */
11267 uint64_t const fImportMask = CPUMCTX_EXTRN_INHIBIT_INT
11268 | CPUMCTX_EXTRN_INHIBIT_NMI
11269#if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE)
11270 | HMVMX_CPUMCTX_EXTRN_ALL
11271#elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS)
11272 | CPUMCTX_EXTRN_RFLAGS
11273#endif
11274 ;
11275 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fImportMask);
11276 AssertRC(rc);
11277
11278 /*
11279 * Sync the TPR shadow with our APIC state.
11280 */
11281 if ( !pVmxTransient->fIsNestedGuest
11282 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
11283 {
11284 Assert(pVmcsInfo->pbVirtApic);
11285 if (pVmxTransient->u8GuestTpr != pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR])
11286 {
11287 rc = APICSetTpr(pVCpu, pVmcsInfo->pbVirtApic[XAPIC_OFF_TPR]);
11288 AssertRC(rc);
11289 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
11290 }
11291 }
11292
11293 Assert(VMMRZCallRing3IsEnabled(pVCpu));
11294 Assert( pVmxTransient->fWasGuestDebugStateActive == false
11295 || pVmxTransient->fWasHyperDebugStateActive == false);
11296 return;
11297 }
11298 }
11299#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
11300 else if (pVmxTransient->fIsNestedGuest)
11301 AssertMsgFailed(("VMLAUNCH/VMRESUME failed but shouldn't happen when VMLAUNCH/VMRESUME was emulated in IEM!\n"));
11302#endif
11303 else
11304 Log4Func(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed));
11305
11306 VMMRZCallRing3Enable(pVCpu);
11307}
11308
11309
11310/**
11311 * Runs the guest code using hardware-assisted VMX the normal way.
11312 *
11313 * @returns VBox status code.
11314 * @param pVCpu The cross context virtual CPU structure.
11315 * @param pcLoops Pointer to the number of executed loops.
11316 */
11317static VBOXSTRICTRC hmR0VmxRunGuestCodeNormal(PVMCPUCC pVCpu, uint32_t *pcLoops)
11318{
11319 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
11320 Assert(pcLoops);
11321 Assert(*pcLoops <= cMaxResumeLoops);
11322 Assert(!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
11323
11324#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
11325 /*
11326 * Switch to the guest VMCS as we may have transitioned from executing the nested-guest
11327 * without leaving ring-0. Otherwise, if we came from ring-3 we would have loaded the
11328 * guest VMCS while entering the VMX ring-0 session.
11329 */
11330 if (pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs)
11331 {
11332 int rc = hmR0VmxSwitchToGstOrNstGstVmcs(pVCpu, false /* fSwitchToNstGstVmcs */);
11333 if (RT_SUCCESS(rc))
11334 { /* likely */ }
11335 else
11336 {
11337 LogRelFunc(("Failed to switch to the guest VMCS. rc=%Rrc\n", rc));
11338 return rc;
11339 }
11340 }
11341#endif
11342
11343 VMXTRANSIENT VmxTransient;
11344 RT_ZERO(VmxTransient);
11345 VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
11346
11347 /* Paranoia. */
11348 Assert(VmxTransient.pVmcsInfo == &pVCpu->hmr0.s.vmx.VmcsInfo);
11349
11350 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
11351 for (;;)
11352 {
11353 Assert(!HMR0SuspendPending());
11354 HMVMX_ASSERT_CPU_SAFE(pVCpu);
11355 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
11356
11357 /*
11358 * Preparatory work for running nested-guest code, this may force us to
11359 * return to ring-3.
11360 *
11361 * Warning! This bugger disables interrupts on VINF_SUCCESS!
11362 */
11363 rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */);
11364 if (rcStrict != VINF_SUCCESS)
11365 break;
11366
11367 /* Interrupts are disabled at this point! */
11368 hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient);
11369 int rcRun = hmR0VmxRunGuest(pVCpu, &VmxTransient);
11370 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
11371 /* Interrupts are re-enabled at this point! */
11372
11373 /*
11374 * Check for errors with running the VM (VMLAUNCH/VMRESUME).
11375 */
11376 if (RT_SUCCESS(rcRun))
11377 { /* very likely */ }
11378 else
11379 {
11380 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
11381 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
11382 return rcRun;
11383 }
11384
11385 /*
11386 * Profile the VM-exit.
11387 */
11388 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
11389 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
11390 STAM_COUNTER_INC(&pVCpu->hm.s.aStatExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
11391 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
11392 HMVMX_START_EXIT_DISPATCH_PROF();
11393
11394 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
11395
11396 /*
11397 * Handle the VM-exit.
11398 */
11399#ifdef HMVMX_USE_FUNCTION_TABLE
11400 rcStrict = g_aVMExitHandlers[VmxTransient.uExitReason].pfn(pVCpu, &VmxTransient);
11401#else
11402 rcStrict = hmR0VmxHandleExit(pVCpu, &VmxTransient);
11403#endif
11404 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
11405 if (rcStrict == VINF_SUCCESS)
11406 {
11407 if (++(*pcLoops) <= cMaxResumeLoops)
11408 continue;
11409 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
11410 rcStrict = VINF_EM_RAW_INTERRUPT;
11411 }
11412 break;
11413 }
11414
11415 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
11416 return rcStrict;
11417}
11418
11419
11420#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
11421/**
11422 * Runs the nested-guest code using hardware-assisted VMX.
11423 *
11424 * @returns VBox status code.
11425 * @param pVCpu The cross context virtual CPU structure.
11426 * @param pcLoops Pointer to the number of executed loops.
11427 *
11428 * @sa hmR0VmxRunGuestCodeNormal.
11429 */
11430static VBOXSTRICTRC hmR0VmxRunGuestCodeNested(PVMCPUCC pVCpu, uint32_t *pcLoops)
11431{
11432 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
11433 Assert(pcLoops);
11434 Assert(*pcLoops <= cMaxResumeLoops);
11435 Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
11436
11437 /*
11438 * Switch to the nested-guest VMCS as we may have transitioned from executing the
11439 * guest without leaving ring-0. Otherwise, if we came from ring-3 we would have
11440 * loaded the nested-guest VMCS while entering the VMX ring-0 session.
11441 */
11442 if (!pVCpu->hmr0.s.vmx.fSwitchedToNstGstVmcs)
11443 {
11444 int rc = hmR0VmxSwitchToGstOrNstGstVmcs(pVCpu, true /* fSwitchToNstGstVmcs */);
11445 if (RT_SUCCESS(rc))
11446 { /* likely */ }
11447 else
11448 {
11449 LogRelFunc(("Failed to switch to the nested-guest VMCS. rc=%Rrc\n", rc));
11450 return rc;
11451 }
11452 }
11453
11454 VMXTRANSIENT VmxTransient;
11455 RT_ZERO(VmxTransient);
11456 VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
11457 VmxTransient.fIsNestedGuest = true;
11458
11459 /* Paranoia. */
11460 Assert(VmxTransient.pVmcsInfo == &pVCpu->hmr0.s.vmx.VmcsInfoNstGst);
11461
11462 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
11463 for (;;)
11464 {
11465 Assert(!HMR0SuspendPending());
11466 HMVMX_ASSERT_CPU_SAFE(pVCpu);
11467 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
11468
11469 /*
11470 * Preparatory work for running guest code, this may force us to
11471 * return to ring-3.
11472 *
11473 * Warning! This bugger disables interrupts on VINF_SUCCESS!
11474 */
11475 rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, false /* fStepping */);
11476 if (rcStrict != VINF_SUCCESS)
11477 break;
11478
11479 /* Interrupts are disabled at this point! */
11480 hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient);
11481 int rcRun = hmR0VmxRunGuest(pVCpu, &VmxTransient);
11482 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
11483 /* Interrupts are re-enabled at this point! */
11484
11485 /*
11486 * Check for errors with running the VM (VMLAUNCH/VMRESUME).
11487 */
11488 if (RT_SUCCESS(rcRun))
11489 { /* very likely */ }
11490 else
11491 {
11492 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
11493 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
11494 return rcRun;
11495 }
11496
11497 /*
11498 * Profile the VM-exit.
11499 */
11500 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
11501 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
11502 STAM_COUNTER_INC(&pVCpu->hm.s.StatNestedExitAll);
11503 STAM_COUNTER_INC(&pVCpu->hm.s.aStatNestedExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
11504 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
11505 HMVMX_START_EXIT_DISPATCH_PROF();
11506
11507 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
11508
11509 /*
11510 * Handle the VM-exit.
11511 */
11512 rcStrict = hmR0VmxHandleExitNested(pVCpu, &VmxTransient);
11513 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
11514 if (rcStrict == VINF_SUCCESS)
11515 {
11516 if (!CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
11517 {
11518 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
11519 rcStrict = VINF_VMX_VMEXIT;
11520 }
11521 else
11522 {
11523 if (++(*pcLoops) <= cMaxResumeLoops)
11524 continue;
11525 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
11526 rcStrict = VINF_EM_RAW_INTERRUPT;
11527 }
11528 }
11529 else
11530 Assert(rcStrict != VINF_VMX_VMEXIT);
11531 break;
11532 }
11533
11534 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
11535 return rcStrict;
11536}
11537#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
11538
11539
11540/** @name Execution loop for single stepping, DBGF events and expensive Dtrace
11541 * probes.
11542 *
11543 * The following few functions and associated structure contains the bloat
11544 * necessary for providing detailed debug events and dtrace probes as well as
11545 * reliable host side single stepping. This works on the principle of
11546 * "subclassing" the normal execution loop and workers. We replace the loop
11547 * method completely and override selected helpers to add necessary adjustments
11548 * to their core operation.
11549 *
11550 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice
11551 * any performance for debug and analysis features.
11552 *
11553 * @{
11554 */
11555
11556/**
11557 * Transient per-VCPU debug state of VMCS and related info. we save/restore in
11558 * the debug run loop.
11559 */
11560typedef struct VMXRUNDBGSTATE
11561{
11562 /** The RIP we started executing at. This is for detecting that we stepped. */
11563 uint64_t uRipStart;
11564 /** The CS we started executing with. */
11565 uint16_t uCsStart;
11566
11567 /** Whether we've actually modified the 1st execution control field. */
11568 bool fModifiedProcCtls : 1;
11569 /** Whether we've actually modified the 2nd execution control field. */
11570 bool fModifiedProcCtls2 : 1;
11571 /** Whether we've actually modified the exception bitmap. */
11572 bool fModifiedXcptBitmap : 1;
11573
11574 /** We desire the modified the CR0 mask to be cleared. */
11575 bool fClearCr0Mask : 1;
11576 /** We desire the modified the CR4 mask to be cleared. */
11577 bool fClearCr4Mask : 1;
11578 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */
11579 uint32_t fCpe1Extra;
11580 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */
11581 uint32_t fCpe1Unwanted;
11582 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */
11583 uint32_t fCpe2Extra;
11584 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */
11585 uint32_t bmXcptExtra;
11586 /** The sequence number of the Dtrace provider settings the state was
11587 * configured against. */
11588 uint32_t uDtraceSettingsSeqNo;
11589 /** VM-exits to check (one bit per VM-exit). */
11590 uint32_t bmExitsToCheck[3];
11591
11592 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */
11593 uint32_t fProcCtlsInitial;
11594 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */
11595 uint32_t fProcCtls2Initial;
11596 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */
11597 uint32_t bmXcptInitial;
11598} VMXRUNDBGSTATE;
11599AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);
11600typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;
11601
11602
11603/**
11604 * Initializes the VMXRUNDBGSTATE structure.
11605 *
11606 * @param pVCpu The cross context virtual CPU structure of the
11607 * calling EMT.
11608 * @param pVmxTransient The VMX-transient structure.
11609 * @param pDbgState The debug state to initialize.
11610 */
11611static void hmR0VmxRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11612{
11613 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;
11614 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;
11615
11616 pDbgState->fModifiedProcCtls = false;
11617 pDbgState->fModifiedProcCtls2 = false;
11618 pDbgState->fModifiedXcptBitmap = false;
11619 pDbgState->fClearCr0Mask = false;
11620 pDbgState->fClearCr4Mask = false;
11621 pDbgState->fCpe1Extra = 0;
11622 pDbgState->fCpe1Unwanted = 0;
11623 pDbgState->fCpe2Extra = 0;
11624 pDbgState->bmXcptExtra = 0;
11625 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;
11626 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;
11627 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;
11628}
11629
11630
11631/**
11632 * Updates the VMSC fields with changes requested by @a pDbgState.
11633 *
11634 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well
11635 * immediately before executing guest code, i.e. when interrupts are disabled.
11636 * We don't check status codes here as we cannot easily assert or return in the
11637 * latter case.
11638 *
11639 * @param pVCpu The cross context virtual CPU structure.
11640 * @param pVmxTransient The VMX-transient structure.
11641 * @param pDbgState The debug state.
11642 */
11643static void hmR0VmxPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11644{
11645 /*
11646 * Ensure desired flags in VMCS control fields are set.
11647 * (Ignoring write failure here, as we're committed and it's just debug extras.)
11648 *
11649 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so
11650 * there should be no stale data in pCtx at this point.
11651 */
11652 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11653 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra
11654 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))
11655 {
11656 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;
11657 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;
11658 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
11659 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));
11660 pDbgState->fModifiedProcCtls = true;
11661 }
11662
11663 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)
11664 {
11665 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;
11666 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);
11667 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));
11668 pDbgState->fModifiedProcCtls2 = true;
11669 }
11670
11671 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)
11672 {
11673 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;
11674 VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);
11675 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));
11676 pDbgState->fModifiedXcptBitmap = true;
11677 }
11678
11679 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)
11680 {
11681 pVmcsInfo->u64Cr0Mask = 0;
11682 VMXWriteVmcsNw(VMX_VMCS_CTRL_CR0_MASK, 0);
11683 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));
11684 }
11685
11686 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)
11687 {
11688 pVmcsInfo->u64Cr4Mask = 0;
11689 VMXWriteVmcsNw(VMX_VMCS_CTRL_CR4_MASK, 0);
11690 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));
11691 }
11692
11693 NOREF(pVCpu);
11694}
11695
11696
11697/**
11698 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for
11699 * re-entry next time around.
11700 *
11701 * @returns Strict VBox status code (i.e. informational status codes too).
11702 * @param pVCpu The cross context virtual CPU structure.
11703 * @param pVmxTransient The VMX-transient structure.
11704 * @param pDbgState The debug state.
11705 * @param rcStrict The return code from executing the guest using single
11706 * stepping.
11707 */
11708static VBOXSTRICTRC hmR0VmxRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,
11709 VBOXSTRICTRC rcStrict)
11710{
11711 /*
11712 * Restore VM-exit control settings as we may not reenter this function the
11713 * next time around.
11714 */
11715 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
11716
11717 /* We reload the initial value, trigger what we can of recalculations the
11718 next time around. From the looks of things, that's all that's required atm. */
11719 if (pDbgState->fModifiedProcCtls)
11720 {
11721 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))
11722 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */
11723 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);
11724 AssertRC(rc2);
11725 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;
11726 }
11727
11728 /* We're currently the only ones messing with this one, so just restore the
11729 cached value and reload the field. */
11730 if ( pDbgState->fModifiedProcCtls2
11731 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)
11732 {
11733 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);
11734 AssertRC(rc2);
11735 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;
11736 }
11737
11738 /* If we've modified the exception bitmap, we restore it and trigger
11739 reloading and partial recalculation the next time around. */
11740 if (pDbgState->fModifiedXcptBitmap)
11741 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;
11742
11743 return rcStrict;
11744}
11745
11746
11747/**
11748 * Configures VM-exit controls for current DBGF and DTrace settings.
11749 *
11750 * This updates @a pDbgState and the VMCS execution control fields to reflect
11751 * the necessary VM-exits demanded by DBGF and DTrace.
11752 *
11753 * @param pVCpu The cross context virtual CPU structure.
11754 * @param pVmxTransient The VMX-transient structure. May update
11755 * fUpdatedTscOffsettingAndPreemptTimer.
11756 * @param pDbgState The debug state.
11757 */
11758static void hmR0VmxPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
11759{
11760 /*
11761 * Take down the dtrace serial number so we can spot changes.
11762 */
11763 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();
11764 ASMCompilerBarrier();
11765
11766 /*
11767 * We'll rebuild most of the middle block of data members (holding the
11768 * current settings) as we go along here, so start by clearing it all.
11769 */
11770 pDbgState->bmXcptExtra = 0;
11771 pDbgState->fCpe1Extra = 0;
11772 pDbgState->fCpe1Unwanted = 0;
11773 pDbgState->fCpe2Extra = 0;
11774 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)
11775 pDbgState->bmExitsToCheck[i] = 0;
11776
11777 /*
11778 * Software interrupts (INT XXh) - no idea how to trigger these...
11779 */
11780 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
11781 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)
11782 || VBOXVMM_INT_SOFTWARE_ENABLED())
11783 {
11784 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11785 }
11786
11787 /*
11788 * INT3 breakpoints - triggered by #BP exceptions.
11789 */
11790 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)
11791 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11792
11793 /*
11794 * Exception bitmap and XCPT events+probes.
11795 */
11796 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)
11797 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))
11798 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);
11799
11800 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);
11801 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);
11802 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);
11803 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);
11804 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);
11805 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);
11806 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);
11807 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);
11808 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);
11809 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);
11810 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);
11811 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);
11812 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);
11813 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);
11814 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);
11815 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);
11816 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);
11817 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);
11818
11819 if (pDbgState->bmXcptExtra)
11820 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);
11821
11822 /*
11823 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.
11824 *
11825 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.
11826 * So, when adding/changing/removing please don't forget to update it.
11827 *
11828 * Some of the macros are picking up local variables to save horizontal space,
11829 * (being able to see it in a table is the lesser evil here).
11830 */
11831#define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \
11832 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \
11833 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )
11834#define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \
11835 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11836 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11837 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11838 } else do { } while (0)
11839#define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \
11840 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11841 { \
11842 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \
11843 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11844 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11845 } else do { } while (0)
11846#define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \
11847 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11848 { \
11849 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \
11850 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11851 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11852 } else do { } while (0)
11853#define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \
11854 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \
11855 { \
11856 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \
11857 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \
11858 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \
11859 } else do { } while (0)
11860
11861 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */
11862 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */
11863 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */
11864 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */
11865 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */
11866
11867 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */
11868 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);
11869 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */
11870 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);
11871 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */
11872 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);
11873 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */
11874 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);
11875 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);
11876 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);
11877 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);
11878 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);
11879 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);
11880 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);
11881 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */
11882 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);
11883 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */
11884 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);
11885 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */
11886 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);
11887 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */
11888 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);
11889 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */
11890 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);
11891 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */
11892 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);
11893 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */
11894 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);
11895 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */
11896 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);
11897 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */
11898 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);
11899 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */
11900 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);
11901 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */
11902 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);
11903
11904 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)
11905 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11906 {
11907 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4
11908 | CPUMCTX_EXTRN_APIC_TPR);
11909 AssertRC(rc);
11910
11911#if 0 /** @todo fix me */
11912 pDbgState->fClearCr0Mask = true;
11913 pDbgState->fClearCr4Mask = true;
11914#endif
11915 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))
11916 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;
11917 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))
11918 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;
11919 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */
11920 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would
11921 require clearing here and in the loop if we start using it. */
11922 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);
11923 }
11924 else
11925 {
11926 if (pDbgState->fClearCr0Mask)
11927 {
11928 pDbgState->fClearCr0Mask = false;
11929 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);
11930 }
11931 if (pDbgState->fClearCr4Mask)
11932 {
11933 pDbgState->fClearCr4Mask = false;
11934 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4);
11935 }
11936 }
11937 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);
11938 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);
11939
11940 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)
11941 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))
11942 {
11943 /** @todo later, need to fix handler as it assumes this won't usually happen. */
11944 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);
11945 }
11946 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);
11947 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);
11948
11949 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */
11950 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);
11951 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);
11952 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);
11953 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */
11954 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);
11955 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */
11956 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);
11957#if 0 /** @todo too slow, fix handler. */
11958 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);
11959#endif
11960 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);
11961
11962 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)
11963 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)
11964 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)
11965 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))
11966 {
11967 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11968 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);
11969 }
11970 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11971 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11972 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11973 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);
11974
11975 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)
11976 || IS_EITHER_ENABLED(pVM, INSTR_STR)
11977 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)
11978 || IS_EITHER_ENABLED(pVM, INSTR_LTR))
11979 {
11980 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;
11981 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);
11982 }
11983 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);
11984 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);
11985 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);
11986 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);
11987
11988 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */
11989 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);
11990 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);
11991 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);
11992 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */
11993 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);
11994 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);
11995 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);
11996 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */
11997 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);
11998 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);
11999 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);
12000 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);
12001 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);
12002 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */
12003 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);
12004 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);
12005 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);
12006 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */
12007 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);
12008 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */
12009 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);
12010
12011#undef IS_EITHER_ENABLED
12012#undef SET_ONLY_XBM_IF_EITHER_EN
12013#undef SET_CPE1_XBM_IF_EITHER_EN
12014#undef SET_CPEU_XBM_IF_EITHER_EN
12015#undef SET_CPE2_XBM_IF_EITHER_EN
12016
12017 /*
12018 * Sanitize the control stuff.
12019 */
12020 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;
12021 if (pDbgState->fCpe2Extra)
12022 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
12023 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;
12024 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;
12025 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))
12026 {
12027 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;
12028 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
12029 }
12030
12031 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",
12032 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,
12033 pDbgState->fClearCr0Mask ? " clr-cr0" : "",
12034 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));
12035}
12036
12037
12038/**
12039 * Fires off DBGF events and dtrace probes for a VM-exit, when it's
12040 * appropriate.
12041 *
12042 * The caller has checked the VM-exit against the
12043 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs
12044 * already, so we don't have to do that either.
12045 *
12046 * @returns Strict VBox status code (i.e. informational status codes too).
12047 * @param pVCpu The cross context virtual CPU structure.
12048 * @param pVmxTransient The VMX-transient structure.
12049 * @param uExitReason The VM-exit reason.
12050 *
12051 * @remarks The name of this function is displayed by dtrace, so keep it short
12052 * and to the point. No longer than 33 chars long, please.
12053 */
12054static VBOXSTRICTRC hmR0VmxHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)
12055{
12056 /*
12057 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the
12058 * same time check whether any corresponding Dtrace event is enabled (fDtrace).
12059 *
12060 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate
12061 * does. Must add/change/remove both places. Same ordering, please.
12062 *
12063 * Added/removed events must also be reflected in the next section
12064 * where we dispatch dtrace events.
12065 */
12066 bool fDtrace1 = false;
12067 bool fDtrace2 = false;
12068 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;
12069 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;
12070 uint32_t uEventArg = 0;
12071#define SET_EXIT(a_EventSubName) \
12072 do { \
12073 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
12074 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
12075 } while (0)
12076#define SET_BOTH(a_EventSubName) \
12077 do { \
12078 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \
12079 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \
12080 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \
12081 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \
12082 } while (0)
12083 switch (uExitReason)
12084 {
12085 case VMX_EXIT_MTF:
12086 return hmR0VmxExitMtf(pVCpu, pVmxTransient);
12087
12088 case VMX_EXIT_XCPT_OR_NMI:
12089 {
12090 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
12091 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))
12092 {
12093 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
12094 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
12095 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
12096 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))
12097 {
12098 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))
12099 {
12100 hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
12101 uEventArg = pVmxTransient->uExitIntErrorCode;
12102 }
12103 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);
12104 switch (enmEvent1)
12105 {
12106 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;
12107 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;
12108 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;
12109 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;
12110 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;
12111 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;
12112 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;
12113 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;
12114 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;
12115 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;
12116 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;
12117 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;
12118 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;
12119 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;
12120 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;
12121 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;
12122 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;
12123 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;
12124 default: break;
12125 }
12126 }
12127 else
12128 AssertFailed();
12129 break;
12130
12131 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
12132 uEventArg = idxVector;
12133 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;
12134 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();
12135 break;
12136 }
12137 break;
12138 }
12139
12140 case VMX_EXIT_TRIPLE_FAULT:
12141 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;
12142 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();
12143 break;
12144 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;
12145 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;
12146 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;
12147 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;
12148 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;
12149
12150 /* Instruction specific VM-exits: */
12151 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;
12152 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;
12153 case VMX_EXIT_HLT: SET_BOTH(HALT); break;
12154 case VMX_EXIT_INVD: SET_BOTH(INVD); break;
12155 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;
12156 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;
12157 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;
12158 case VMX_EXIT_RSM: SET_BOTH(RSM); break;
12159 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;
12160 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;
12161 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;
12162 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;
12163 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;
12164 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;
12165 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;
12166 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;
12167 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;
12168 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;
12169 case VMX_EXIT_MOV_CRX:
12170 hmR0VmxReadExitQualVmcs(pVmxTransient);
12171 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)
12172 SET_BOTH(CRX_READ);
12173 else
12174 SET_BOTH(CRX_WRITE);
12175 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
12176 break;
12177 case VMX_EXIT_MOV_DRX:
12178 hmR0VmxReadExitQualVmcs(pVmxTransient);
12179 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)
12180 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)
12181 SET_BOTH(DRX_READ);
12182 else
12183 SET_BOTH(DRX_WRITE);
12184 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);
12185 break;
12186 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;
12187 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;
12188 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;
12189 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;
12190 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;
12191 case VMX_EXIT_GDTR_IDTR_ACCESS:
12192 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
12193 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))
12194 {
12195 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;
12196 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;
12197 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;
12198 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;
12199 }
12200 break;
12201
12202 case VMX_EXIT_LDTR_TR_ACCESS:
12203 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
12204 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))
12205 {
12206 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;
12207 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;
12208 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;
12209 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;
12210 }
12211 break;
12212
12213 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;
12214 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;
12215 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;
12216 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;
12217 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;
12218 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;
12219 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;
12220 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;
12221 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;
12222 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;
12223 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;
12224
12225 /* Events that aren't relevant at this point. */
12226 case VMX_EXIT_EXT_INT:
12227 case VMX_EXIT_INT_WINDOW:
12228 case VMX_EXIT_NMI_WINDOW:
12229 case VMX_EXIT_TPR_BELOW_THRESHOLD:
12230 case VMX_EXIT_PREEMPT_TIMER:
12231 case VMX_EXIT_IO_INSTR:
12232 break;
12233
12234 /* Errors and unexpected events. */
12235 case VMX_EXIT_INIT_SIGNAL:
12236 case VMX_EXIT_SIPI:
12237 case VMX_EXIT_IO_SMI:
12238 case VMX_EXIT_SMI:
12239 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
12240 case VMX_EXIT_ERR_MSR_LOAD:
12241 case VMX_EXIT_ERR_MACHINE_CHECK:
12242 case VMX_EXIT_PML_FULL:
12243 case VMX_EXIT_VIRTUALIZED_EOI:
12244 break;
12245
12246 default:
12247 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
12248 break;
12249 }
12250#undef SET_BOTH
12251#undef SET_EXIT
12252
12253 /*
12254 * Dtrace tracepoints go first. We do them here at once so we don't
12255 * have to copy the guest state saving and stuff a few dozen times.
12256 * Down side is that we've got to repeat the switch, though this time
12257 * we use enmEvent since the probes are a subset of what DBGF does.
12258 */
12259 if (fDtrace1 || fDtrace2)
12260 {
12261 hmR0VmxReadExitQualVmcs(pVmxTransient);
12262 hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
12263 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
12264 switch (enmEvent1)
12265 {
12266 /** @todo consider which extra parameters would be helpful for each probe. */
12267 case DBGFEVENT_END: break;
12268 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;
12269 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;
12270 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;
12271 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;
12272 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;
12273 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;
12274 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;
12275 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;
12276 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;
12277 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;
12278 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;
12279 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;
12280 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;
12281 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;
12282 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;
12283 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;
12284 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;
12285 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;
12286 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;
12287 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
12288 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;
12289 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;
12290 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;
12291 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;
12292 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;
12293 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;
12294 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;
12295 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
12296 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
12297 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
12298 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
12299 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
12300 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,
12301 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
12302 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;
12303 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;
12304 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;
12305 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;
12306 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;
12307 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;
12308 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;
12309 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;
12310 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;
12311 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;
12312 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;
12313 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;
12314 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;
12315 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;
12316 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;
12317 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;
12318 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;
12319 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;
12320 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;
12321 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;
12322 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;
12323 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;
12324 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;
12325 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;
12326 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;
12327 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;
12328 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;
12329 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;
12330 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;
12331 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;
12332 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;
12333 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;
12334 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;
12335 }
12336 switch (enmEvent2)
12337 {
12338 /** @todo consider which extra parameters would be helpful for each probe. */
12339 case DBGFEVENT_END: break;
12340 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;
12341 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;
12342 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;
12343 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;
12344 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;
12345 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;
12346 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;
12347 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;
12348 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;
12349 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
12350 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
12351 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;
12352 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;
12353 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;
12354 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,
12355 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;
12356 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;
12357 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;
12358 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;
12359 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;
12360 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;
12361 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;
12362 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;
12363 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;
12364 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;
12365 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;
12366 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;
12367 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;
12368 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;
12369 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;
12370 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;
12371 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;
12372 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;
12373 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;
12374 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;
12375 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;
12376 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;
12377 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;
12378 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;
12379 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;
12380 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;
12381 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;
12382 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;
12383 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;
12384 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;
12385 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;
12386 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;
12387 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;
12388 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;
12389 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;
12390 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;
12391 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;
12392 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;
12393 }
12394 }
12395
12396 /*
12397 * Fire of the DBGF event, if enabled (our check here is just a quick one,
12398 * the DBGF call will do a full check).
12399 *
12400 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.
12401 * Note! If we have to events, we prioritize the first, i.e. the instruction
12402 * one, in order to avoid event nesting.
12403 */
12404 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
12405 if ( enmEvent1 != DBGFEVENT_END
12406 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))
12407 {
12408 hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
12409 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);
12410 if (rcStrict != VINF_SUCCESS)
12411 return rcStrict;
12412 }
12413 else if ( enmEvent2 != DBGFEVENT_END
12414 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))
12415 {
12416 hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
12417 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);
12418 if (rcStrict != VINF_SUCCESS)
12419 return rcStrict;
12420 }
12421
12422 return VINF_SUCCESS;
12423}
12424
12425
12426/**
12427 * Single-stepping VM-exit filtering.
12428 *
12429 * This is preprocessing the VM-exits and deciding whether we've gotten far
12430 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit
12431 * handling is performed.
12432 *
12433 * @returns Strict VBox status code (i.e. informational status codes too).
12434 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
12435 * @param pVmxTransient The VMX-transient structure.
12436 * @param pDbgState The debug state.
12437 */
12438DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
12439{
12440 /*
12441 * Expensive (saves context) generic dtrace VM-exit probe.
12442 */
12443 uint32_t const uExitReason = pVmxTransient->uExitReason;
12444 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())
12445 { /* more likely */ }
12446 else
12447 {
12448 hmR0VmxReadExitQualVmcs(pVmxTransient);
12449 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
12450 AssertRC(rc);
12451 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
12452 }
12453
12454 /*
12455 * Check for host NMI, just to get that out of the way.
12456 */
12457 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)
12458 { /* normally likely */ }
12459 else
12460 {
12461 hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
12462 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
12463 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)
12464 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
12465 }
12466
12467 /*
12468 * Check for single stepping event if we're stepping.
12469 */
12470 if (pVCpu->hm.s.fSingleInstruction)
12471 {
12472 switch (uExitReason)
12473 {
12474 case VMX_EXIT_MTF:
12475 return hmR0VmxExitMtf(pVCpu, pVmxTransient);
12476
12477 /* Various events: */
12478 case VMX_EXIT_XCPT_OR_NMI:
12479 case VMX_EXIT_EXT_INT:
12480 case VMX_EXIT_TRIPLE_FAULT:
12481 case VMX_EXIT_INT_WINDOW:
12482 case VMX_EXIT_NMI_WINDOW:
12483 case VMX_EXIT_TASK_SWITCH:
12484 case VMX_EXIT_TPR_BELOW_THRESHOLD:
12485 case VMX_EXIT_APIC_ACCESS:
12486 case VMX_EXIT_EPT_VIOLATION:
12487 case VMX_EXIT_EPT_MISCONFIG:
12488 case VMX_EXIT_PREEMPT_TIMER:
12489
12490 /* Instruction specific VM-exits: */
12491 case VMX_EXIT_CPUID:
12492 case VMX_EXIT_GETSEC:
12493 case VMX_EXIT_HLT:
12494 case VMX_EXIT_INVD:
12495 case VMX_EXIT_INVLPG:
12496 case VMX_EXIT_RDPMC:
12497 case VMX_EXIT_RDTSC:
12498 case VMX_EXIT_RSM:
12499 case VMX_EXIT_VMCALL:
12500 case VMX_EXIT_VMCLEAR:
12501 case VMX_EXIT_VMLAUNCH:
12502 case VMX_EXIT_VMPTRLD:
12503 case VMX_EXIT_VMPTRST:
12504 case VMX_EXIT_VMREAD:
12505 case VMX_EXIT_VMRESUME:
12506 case VMX_EXIT_VMWRITE:
12507 case VMX_EXIT_VMXOFF:
12508 case VMX_EXIT_VMXON:
12509 case VMX_EXIT_MOV_CRX:
12510 case VMX_EXIT_MOV_DRX:
12511 case VMX_EXIT_IO_INSTR:
12512 case VMX_EXIT_RDMSR:
12513 case VMX_EXIT_WRMSR:
12514 case VMX_EXIT_MWAIT:
12515 case VMX_EXIT_MONITOR:
12516 case VMX_EXIT_PAUSE:
12517 case VMX_EXIT_GDTR_IDTR_ACCESS:
12518 case VMX_EXIT_LDTR_TR_ACCESS:
12519 case VMX_EXIT_INVEPT:
12520 case VMX_EXIT_RDTSCP:
12521 case VMX_EXIT_INVVPID:
12522 case VMX_EXIT_WBINVD:
12523 case VMX_EXIT_XSETBV:
12524 case VMX_EXIT_RDRAND:
12525 case VMX_EXIT_INVPCID:
12526 case VMX_EXIT_VMFUNC:
12527 case VMX_EXIT_RDSEED:
12528 case VMX_EXIT_XSAVES:
12529 case VMX_EXIT_XRSTORS:
12530 {
12531 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
12532 AssertRCReturn(rc, rc);
12533 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart
12534 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)
12535 return VINF_EM_DBG_STEPPED;
12536 break;
12537 }
12538
12539 /* Errors and unexpected events: */
12540 case VMX_EXIT_INIT_SIGNAL:
12541 case VMX_EXIT_SIPI:
12542 case VMX_EXIT_IO_SMI:
12543 case VMX_EXIT_SMI:
12544 case VMX_EXIT_ERR_INVALID_GUEST_STATE:
12545 case VMX_EXIT_ERR_MSR_LOAD:
12546 case VMX_EXIT_ERR_MACHINE_CHECK:
12547 case VMX_EXIT_PML_FULL:
12548 case VMX_EXIT_VIRTUALIZED_EOI:
12549 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */
12550 break;
12551
12552 default:
12553 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));
12554 break;
12555 }
12556 }
12557
12558 /*
12559 * Check for debugger event breakpoints and dtrace probes.
12560 */
12561 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U
12562 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )
12563 {
12564 VBOXSTRICTRC rcStrict = hmR0VmxHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);
12565 if (rcStrict != VINF_SUCCESS)
12566 return rcStrict;
12567 }
12568
12569 /*
12570 * Normal processing.
12571 */
12572#ifdef HMVMX_USE_FUNCTION_TABLE
12573 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);
12574#else
12575 return hmR0VmxHandleExit(pVCpu, pVmxTransient, uExitReason);
12576#endif
12577}
12578
12579
12580/**
12581 * Single steps guest code using hardware-assisted VMX.
12582 *
12583 * This is -not- the same as the guest single-stepping itself (say using EFLAGS.TF)
12584 * but single-stepping through the hypervisor debugger.
12585 *
12586 * @returns Strict VBox status code (i.e. informational status codes too).
12587 * @param pVCpu The cross context virtual CPU structure.
12588 * @param pcLoops Pointer to the number of executed loops.
12589 *
12590 * @note Mostly the same as hmR0VmxRunGuestCodeNormal().
12591 */
12592static VBOXSTRICTRC hmR0VmxRunGuestCodeDebug(PVMCPUCC pVCpu, uint32_t *pcLoops)
12593{
12594 uint32_t const cMaxResumeLoops = pVCpu->CTX_SUFF(pVM)->hmr0.s.cMaxResumeLoops;
12595 Assert(pcLoops);
12596 Assert(*pcLoops <= cMaxResumeLoops);
12597
12598 VMXTRANSIENT VmxTransient;
12599 RT_ZERO(VmxTransient);
12600 VmxTransient.pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
12601
12602 /* Set HMCPU indicators. */
12603 bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction;
12604 pVCpu->hm.s.fSingleInstruction = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
12605 pVCpu->hmr0.s.fDebugWantRdTscExit = false;
12606 pVCpu->hmr0.s.fUsingDebugLoop = true;
12607
12608 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */
12609 VMXRUNDBGSTATE DbgState;
12610 hmR0VmxRunDebugStateInit(pVCpu, &VmxTransient, &DbgState);
12611 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);
12612
12613 /*
12614 * The loop.
12615 */
12616 VBOXSTRICTRC rcStrict = VERR_INTERNAL_ERROR_5;
12617 for (;;)
12618 {
12619 Assert(!HMR0SuspendPending());
12620 HMVMX_ASSERT_CPU_SAFE(pVCpu);
12621 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
12622 bool fStepping = pVCpu->hm.s.fSingleInstruction;
12623
12624 /* Set up VM-execution controls the next two can respond to. */
12625 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
12626
12627 /*
12628 * Preparatory work for running guest code, this may force us to
12629 * return to ring-3.
12630 *
12631 * Warning! This bugger disables interrupts on VINF_SUCCESS!
12632 */
12633 rcStrict = hmR0VmxPreRunGuest(pVCpu, &VmxTransient, fStepping);
12634 if (rcStrict != VINF_SUCCESS)
12635 break;
12636
12637 /* Interrupts are disabled at this point! */
12638 hmR0VmxPreRunGuestCommitted(pVCpu, &VmxTransient);
12639
12640 /* Override any obnoxious code in the above two calls. */
12641 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
12642
12643 /*
12644 * Finally execute the guest.
12645 */
12646 int rcRun = hmR0VmxRunGuest(pVCpu, &VmxTransient);
12647
12648 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun);
12649 /* Interrupts are re-enabled at this point! */
12650
12651 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
12652 if (RT_SUCCESS(rcRun))
12653 { /* very likely */ }
12654 else
12655 {
12656 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPreExit, x);
12657 hmR0VmxReportWorldSwitchError(pVCpu, rcRun, &VmxTransient);
12658 return rcRun;
12659 }
12660
12661 /* Profile the VM-exit. */
12662 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
12663 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll);
12664 STAM_COUNTER_INC(&pVCpu->hm.s.aStatExitReason[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
12665 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatPreExit, &pVCpu->hm.s.StatExitHandling, x);
12666 HMVMX_START_EXIT_DISPATCH_PROF();
12667
12668 VBOXVMM_R0_HMVMX_VMEXIT_NOCTX(pVCpu, &pVCpu->cpum.GstCtx, VmxTransient.uExitReason);
12669
12670 /*
12671 * Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitDebug().
12672 */
12673 rcStrict = hmR0VmxRunDebugHandleExit(pVCpu, &VmxTransient, &DbgState);
12674 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x);
12675 if (rcStrict != VINF_SUCCESS)
12676 break;
12677 if (++(*pcLoops) > cMaxResumeLoops)
12678 {
12679 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchMaxResumeLoops);
12680 rcStrict = VINF_EM_RAW_INTERRUPT;
12681 break;
12682 }
12683
12684 /*
12685 * Stepping: Did the RIP change, if so, consider it a single step.
12686 * Otherwise, make sure one of the TFs gets set.
12687 */
12688 if (fStepping)
12689 {
12690 int rc = hmR0VmxImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
12691 AssertRC(rc);
12692 if ( pVCpu->cpum.GstCtx.rip != DbgState.uRipStart
12693 || pVCpu->cpum.GstCtx.cs.Sel != DbgState.uCsStart)
12694 {
12695 rcStrict = VINF_EM_DBG_STEPPED;
12696 break;
12697 }
12698 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
12699 }
12700
12701 /*
12702 * Update when dtrace settings changes (DBGF kicks us, so no need to check).
12703 */
12704 if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo)
12705 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);
12706
12707 /* Restore all controls applied by hmR0VmxPreRunGuestDebugStateApply above. */
12708 rcStrict = hmR0VmxRunDebugStateRevert(pVCpu, &VmxTransient, &DbgState, rcStrict);
12709 Assert(rcStrict == VINF_SUCCESS);
12710 }
12711
12712 /*
12713 * Clear the X86_EFL_TF if necessary.
12714 */
12715 if (pVCpu->hmr0.s.fClearTrapFlag)
12716 {
12717 int rc = hmR0VmxImportGuestState(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
12718 AssertRC(rc);
12719 pVCpu->hmr0.s.fClearTrapFlag = false;
12720 pVCpu->cpum.GstCtx.eflags.Bits.u1TF = 0;
12721 }
12722 /** @todo there seems to be issues with the resume flag when the monitor trap
12723 * flag is pending without being used. Seen early in bios init when
12724 * accessing APIC page in protected mode. */
12725
12726 /* Restore HMCPU indicators. */
12727 pVCpu->hmr0.s.fUsingDebugLoop = false;
12728 pVCpu->hmr0.s.fDebugWantRdTscExit = false;
12729 pVCpu->hm.s.fSingleInstruction = fSavedSingleInstruction;
12730
12731 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
12732 return rcStrict;
12733}
12734
12735
12736/** @} */
12737
12738
12739/**
12740 * Checks if any expensive dtrace probes are enabled and we should go to the
12741 * debug loop.
12742 *
12743 * @returns true if we should use debug loop, false if not.
12744 */
12745static bool hmR0VmxAnyExpensiveProbesEnabled(void)
12746{
12747 /* It's probably faster to OR the raw 32-bit counter variables together.
12748 Since the variables are in an array and the probes are next to one
12749 another (more or less), we have good locality. So, better read
12750 eight-nine cache lines ever time and only have one conditional, than
12751 128+ conditionals, right? */
12752 return ( VBOXVMM_R0_HMVMX_VMEXIT_ENABLED_RAW() /* expensive too due to context */
12753 | VBOXVMM_XCPT_DE_ENABLED_RAW()
12754 | VBOXVMM_XCPT_DB_ENABLED_RAW()
12755 | VBOXVMM_XCPT_BP_ENABLED_RAW()
12756 | VBOXVMM_XCPT_OF_ENABLED_RAW()
12757 | VBOXVMM_XCPT_BR_ENABLED_RAW()
12758 | VBOXVMM_XCPT_UD_ENABLED_RAW()
12759 | VBOXVMM_XCPT_NM_ENABLED_RAW()
12760 | VBOXVMM_XCPT_DF_ENABLED_RAW()
12761 | VBOXVMM_XCPT_TS_ENABLED_RAW()
12762 | VBOXVMM_XCPT_NP_ENABLED_RAW()
12763 | VBOXVMM_XCPT_SS_ENABLED_RAW()
12764 | VBOXVMM_XCPT_GP_ENABLED_RAW()
12765 | VBOXVMM_XCPT_PF_ENABLED_RAW()
12766 | VBOXVMM_XCPT_MF_ENABLED_RAW()
12767 | VBOXVMM_XCPT_AC_ENABLED_RAW()
12768 | VBOXVMM_XCPT_XF_ENABLED_RAW()
12769 | VBOXVMM_XCPT_VE_ENABLED_RAW()
12770 | VBOXVMM_XCPT_SX_ENABLED_RAW()
12771 | VBOXVMM_INT_SOFTWARE_ENABLED_RAW()
12772 | VBOXVMM_INT_HARDWARE_ENABLED_RAW()
12773 ) != 0
12774 || ( VBOXVMM_INSTR_HALT_ENABLED_RAW()
12775 | VBOXVMM_INSTR_MWAIT_ENABLED_RAW()
12776 | VBOXVMM_INSTR_MONITOR_ENABLED_RAW()
12777 | VBOXVMM_INSTR_CPUID_ENABLED_RAW()
12778 | VBOXVMM_INSTR_INVD_ENABLED_RAW()
12779 | VBOXVMM_INSTR_WBINVD_ENABLED_RAW()
12780 | VBOXVMM_INSTR_INVLPG_ENABLED_RAW()
12781 | VBOXVMM_INSTR_RDTSC_ENABLED_RAW()
12782 | VBOXVMM_INSTR_RDTSCP_ENABLED_RAW()
12783 | VBOXVMM_INSTR_RDPMC_ENABLED_RAW()
12784 | VBOXVMM_INSTR_RDMSR_ENABLED_RAW()
12785 | VBOXVMM_INSTR_WRMSR_ENABLED_RAW()
12786 | VBOXVMM_INSTR_CRX_READ_ENABLED_RAW()
12787 | VBOXVMM_INSTR_CRX_WRITE_ENABLED_RAW()
12788 | VBOXVMM_INSTR_DRX_READ_ENABLED_RAW()
12789 | VBOXVMM_INSTR_DRX_WRITE_ENABLED_RAW()
12790 | VBOXVMM_INSTR_PAUSE_ENABLED_RAW()
12791 | VBOXVMM_INSTR_XSETBV_ENABLED_RAW()
12792 | VBOXVMM_INSTR_SIDT_ENABLED_RAW()
12793 | VBOXVMM_INSTR_LIDT_ENABLED_RAW()
12794 | VBOXVMM_INSTR_SGDT_ENABLED_RAW()
12795 | VBOXVMM_INSTR_LGDT_ENABLED_RAW()
12796 | VBOXVMM_INSTR_SLDT_ENABLED_RAW()
12797 | VBOXVMM_INSTR_LLDT_ENABLED_RAW()
12798 | VBOXVMM_INSTR_STR_ENABLED_RAW()
12799 | VBOXVMM_INSTR_LTR_ENABLED_RAW()
12800 | VBOXVMM_INSTR_GETSEC_ENABLED_RAW()
12801 | VBOXVMM_INSTR_RSM_ENABLED_RAW()
12802 | VBOXVMM_INSTR_RDRAND_ENABLED_RAW()
12803 | VBOXVMM_INSTR_RDSEED_ENABLED_RAW()
12804 | VBOXVMM_INSTR_XSAVES_ENABLED_RAW()
12805 | VBOXVMM_INSTR_XRSTORS_ENABLED_RAW()
12806 | VBOXVMM_INSTR_VMM_CALL_ENABLED_RAW()
12807 | VBOXVMM_INSTR_VMX_VMCLEAR_ENABLED_RAW()
12808 | VBOXVMM_INSTR_VMX_VMLAUNCH_ENABLED_RAW()
12809 | VBOXVMM_INSTR_VMX_VMPTRLD_ENABLED_RAW()
12810 | VBOXVMM_INSTR_VMX_VMPTRST_ENABLED_RAW()
12811 | VBOXVMM_INSTR_VMX_VMREAD_ENABLED_RAW()
12812 | VBOXVMM_INSTR_VMX_VMRESUME_ENABLED_RAW()
12813 | VBOXVMM_INSTR_VMX_VMWRITE_ENABLED_RAW()
12814 | VBOXVMM_INSTR_VMX_VMXOFF_ENABLED_RAW()
12815 | VBOXVMM_INSTR_VMX_VMXON_ENABLED_RAW()
12816 | VBOXVMM_INSTR_VMX_VMFUNC_ENABLED_RAW()
12817 | VBOXVMM_INSTR_VMX_INVEPT_ENABLED_RAW()
12818 | VBOXVMM_INSTR_VMX_INVVPID_ENABLED_RAW()
12819 | VBOXVMM_INSTR_VMX_INVPCID_ENABLED_RAW()
12820 ) != 0
12821 || ( VBOXVMM_EXIT_TASK_SWITCH_ENABLED_RAW()
12822 | VBOXVMM_EXIT_HALT_ENABLED_RAW()
12823 | VBOXVMM_EXIT_MWAIT_ENABLED_RAW()
12824 | VBOXVMM_EXIT_MONITOR_ENABLED_RAW()
12825 | VBOXVMM_EXIT_CPUID_ENABLED_RAW()
12826 | VBOXVMM_EXIT_INVD_ENABLED_RAW()
12827 | VBOXVMM_EXIT_WBINVD_ENABLED_RAW()
12828 | VBOXVMM_EXIT_INVLPG_ENABLED_RAW()
12829 | VBOXVMM_EXIT_RDTSC_ENABLED_RAW()
12830 | VBOXVMM_EXIT_RDTSCP_ENABLED_RAW()
12831 | VBOXVMM_EXIT_RDPMC_ENABLED_RAW()
12832 | VBOXVMM_EXIT_RDMSR_ENABLED_RAW()
12833 | VBOXVMM_EXIT_WRMSR_ENABLED_RAW()
12834 | VBOXVMM_EXIT_CRX_READ_ENABLED_RAW()
12835 | VBOXVMM_EXIT_CRX_WRITE_ENABLED_RAW()
12836 | VBOXVMM_EXIT_DRX_READ_ENABLED_RAW()
12837 | VBOXVMM_EXIT_DRX_WRITE_ENABLED_RAW()
12838 | VBOXVMM_EXIT_PAUSE_ENABLED_RAW()
12839 | VBOXVMM_EXIT_XSETBV_ENABLED_RAW()
12840 | VBOXVMM_EXIT_SIDT_ENABLED_RAW()
12841 | VBOXVMM_EXIT_LIDT_ENABLED_RAW()
12842 | VBOXVMM_EXIT_SGDT_ENABLED_RAW()
12843 | VBOXVMM_EXIT_LGDT_ENABLED_RAW()
12844 | VBOXVMM_EXIT_SLDT_ENABLED_RAW()
12845 | VBOXVMM_EXIT_LLDT_ENABLED_RAW()
12846 | VBOXVMM_EXIT_STR_ENABLED_RAW()
12847 | VBOXVMM_EXIT_LTR_ENABLED_RAW()
12848 | VBOXVMM_EXIT_GETSEC_ENABLED_RAW()
12849 | VBOXVMM_EXIT_RSM_ENABLED_RAW()
12850 | VBOXVMM_EXIT_RDRAND_ENABLED_RAW()
12851 | VBOXVMM_EXIT_RDSEED_ENABLED_RAW()
12852 | VBOXVMM_EXIT_XSAVES_ENABLED_RAW()
12853 | VBOXVMM_EXIT_XRSTORS_ENABLED_RAW()
12854 | VBOXVMM_EXIT_VMM_CALL_ENABLED_RAW()
12855 | VBOXVMM_EXIT_VMX_VMCLEAR_ENABLED_RAW()
12856 | VBOXVMM_EXIT_VMX_VMLAUNCH_ENABLED_RAW()
12857 | VBOXVMM_EXIT_VMX_VMPTRLD_ENABLED_RAW()
12858 | VBOXVMM_EXIT_VMX_VMPTRST_ENABLED_RAW()
12859 | VBOXVMM_EXIT_VMX_VMREAD_ENABLED_RAW()
12860 | VBOXVMM_EXIT_VMX_VMRESUME_ENABLED_RAW()
12861 | VBOXVMM_EXIT_VMX_VMWRITE_ENABLED_RAW()
12862 | VBOXVMM_EXIT_VMX_VMXOFF_ENABLED_RAW()
12863 | VBOXVMM_EXIT_VMX_VMXON_ENABLED_RAW()
12864 | VBOXVMM_EXIT_VMX_VMFUNC_ENABLED_RAW()
12865 | VBOXVMM_EXIT_VMX_INVEPT_ENABLED_RAW()
12866 | VBOXVMM_EXIT_VMX_INVVPID_ENABLED_RAW()
12867 | VBOXVMM_EXIT_VMX_INVPCID_ENABLED_RAW()
12868 | VBOXVMM_EXIT_VMX_EPT_VIOLATION_ENABLED_RAW()
12869 | VBOXVMM_EXIT_VMX_EPT_MISCONFIG_ENABLED_RAW()
12870 | VBOXVMM_EXIT_VMX_VAPIC_ACCESS_ENABLED_RAW()
12871 | VBOXVMM_EXIT_VMX_VAPIC_WRITE_ENABLED_RAW()
12872 ) != 0;
12873}
12874
12875
12876/**
12877 * Runs the guest using hardware-assisted VMX.
12878 *
12879 * @returns Strict VBox status code (i.e. informational status codes too).
12880 * @param pVCpu The cross context virtual CPU structure.
12881 */
12882VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPUCC pVCpu)
12883{
12884 AssertPtr(pVCpu);
12885 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
12886 Assert(VMMRZCallRing3IsEnabled(pVCpu));
12887 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
12888 HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
12889
12890 VBOXSTRICTRC rcStrict;
12891 uint32_t cLoops = 0;
12892 for (;;)
12893 {
12894#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12895 bool const fInNestedGuestMode = CPUMIsGuestInVmxNonRootMode(pCtx);
12896#else
12897 NOREF(pCtx);
12898 bool const fInNestedGuestMode = false;
12899#endif
12900 if (!fInNestedGuestMode)
12901 {
12902 if ( !pVCpu->hm.s.fUseDebugLoop
12903 && (!VBOXVMM_ANY_PROBES_ENABLED() || !hmR0VmxAnyExpensiveProbesEnabled())
12904 && !DBGFIsStepping(pVCpu)
12905 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
12906 rcStrict = hmR0VmxRunGuestCodeNormal(pVCpu, &cLoops);
12907 else
12908 rcStrict = hmR0VmxRunGuestCodeDebug(pVCpu, &cLoops);
12909 }
12910#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12911 else
12912 rcStrict = hmR0VmxRunGuestCodeNested(pVCpu, &cLoops);
12913
12914 if (rcStrict == VINF_VMX_VMLAUNCH_VMRESUME)
12915 {
12916 Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
12917 continue;
12918 }
12919 if (rcStrict == VINF_VMX_VMEXIT)
12920 {
12921 Assert(!CPUMIsGuestInVmxNonRootMode(pCtx));
12922 continue;
12923 }
12924#endif
12925 break;
12926 }
12927
12928 int const rcLoop = VBOXSTRICTRC_VAL(rcStrict);
12929 switch (rcLoop)
12930 {
12931 case VERR_EM_INTERPRETER: rcStrict = VINF_EM_RAW_EMULATE_INSTR; break;
12932 case VINF_EM_RESET: rcStrict = VINF_EM_TRIPLE_FAULT; break;
12933 }
12934
12935 int rc2 = hmR0VmxExitToRing3(pVCpu, rcStrict);
12936 if (RT_FAILURE(rc2))
12937 {
12938 pVCpu->hm.s.u32HMError = (uint32_t)VBOXSTRICTRC_VAL(rcStrict);
12939 rcStrict = rc2;
12940 }
12941 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn));
12942 Assert(!VMMR0AssertionIsNotificationSet(pVCpu));
12943 return rcStrict;
12944}
12945
12946
12947#ifndef HMVMX_USE_FUNCTION_TABLE
12948/**
12949 * Handles a guest VM-exit from hardware-assisted VMX execution.
12950 *
12951 * @returns Strict VBox status code (i.e. informational status codes too).
12952 * @param pVCpu The cross context virtual CPU structure.
12953 * @param pVmxTransient The VMX-transient structure.
12954 */
12955DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
12956{
12957#ifdef DEBUG_ramshankar
12958# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \
12959 do { \
12960 if (a_fSave != 0) \
12961 hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); \
12962 VBOXSTRICTRC rcStrict = a_CallExpr; \
12963 if (a_fSave != 0) \
12964 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \
12965 return rcStrict; \
12966 } while (0)
12967#else
12968# define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr
12969#endif
12970 uint32_t const uExitReason = pVmxTransient->uExitReason;
12971 switch (uExitReason)
12972 {
12973 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, hmR0VmxExitEptMisconfig(pVCpu, pVmxTransient));
12974 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, hmR0VmxExitEptViolation(pVCpu, pVmxTransient));
12975 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, hmR0VmxExitIoInstr(pVCpu, pVmxTransient));
12976 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0VmxExitCpuid(pVCpu, pVmxTransient));
12977 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0VmxExitRdtsc(pVCpu, pVmxTransient));
12978 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0VmxExitRdtscp(pVCpu, pVmxTransient));
12979 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitApicAccess(pVCpu, pVmxTransient));
12980 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, hmR0VmxExitXcptOrNmi(pVCpu, pVmxTransient));
12981 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovCRx(pVCpu, pVmxTransient));
12982 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, hmR0VmxExitExtInt(pVCpu, pVmxTransient));
12983 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, hmR0VmxExitIntWindow(pVCpu, pVmxTransient));
12984 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, hmR0VmxExitTprBelowThreshold(pVCpu, pVmxTransient));
12985 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0VmxExitMwait(pVCpu, pVmxTransient));
12986 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0VmxExitMonitor(pVCpu, pVmxTransient));
12987 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0VmxExitTaskSwitch(pVCpu, pVmxTransient));
12988 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, hmR0VmxExitPreemptTimer(pVCpu, pVmxTransient));
12989 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, hmR0VmxExitRdmsr(pVCpu, pVmxTransient));
12990 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, hmR0VmxExitWrmsr(pVCpu, pVmxTransient));
12991 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, hmR0VmxExitVmcall(pVCpu, pVmxTransient));
12992 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovDRx(pVCpu, pVmxTransient));
12993 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0VmxExitHlt(pVCpu, pVmxTransient));
12994 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0VmxExitInvd(pVCpu, pVmxTransient));
12995 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0VmxExitInvlpg(pVCpu, pVmxTransient));
12996 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, hmR0VmxExitMtf(pVCpu, pVmxTransient));
12997 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0VmxExitPause(pVCpu, pVmxTransient));
12998 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0VmxExitWbinvd(pVCpu, pVmxTransient));
12999 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0VmxExitXsetbv(pVCpu, pVmxTransient));
13000 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, hmR0VmxExitInvpcid(pVCpu, pVmxTransient));
13001 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, hmR0VmxExitGetsec(pVCpu, pVmxTransient));
13002 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0VmxExitRdpmc(pVCpu, pVmxTransient));
13003#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13004 case VMX_EXIT_VMCLEAR: VMEXIT_CALL_RET(0, hmR0VmxExitVmclear(pVCpu, pVmxTransient));
13005 case VMX_EXIT_VMLAUNCH: VMEXIT_CALL_RET(0, hmR0VmxExitVmlaunch(pVCpu, pVmxTransient));
13006 case VMX_EXIT_VMPTRLD: VMEXIT_CALL_RET(0, hmR0VmxExitVmptrld(pVCpu, pVmxTransient));
13007 case VMX_EXIT_VMPTRST: VMEXIT_CALL_RET(0, hmR0VmxExitVmptrst(pVCpu, pVmxTransient));
13008 case VMX_EXIT_VMREAD: VMEXIT_CALL_RET(0, hmR0VmxExitVmread(pVCpu, pVmxTransient));
13009 case VMX_EXIT_VMRESUME: VMEXIT_CALL_RET(0, hmR0VmxExitVmwrite(pVCpu, pVmxTransient));
13010 case VMX_EXIT_VMWRITE: VMEXIT_CALL_RET(0, hmR0VmxExitVmresume(pVCpu, pVmxTransient));
13011 case VMX_EXIT_VMXOFF: VMEXIT_CALL_RET(0, hmR0VmxExitVmxoff(pVCpu, pVmxTransient));
13012 case VMX_EXIT_VMXON: VMEXIT_CALL_RET(0, hmR0VmxExitVmxon(pVCpu, pVmxTransient));
13013 case VMX_EXIT_INVVPID: VMEXIT_CALL_RET(0, hmR0VmxExitInvvpid(pVCpu, pVmxTransient));
13014 case VMX_EXIT_INVEPT: VMEXIT_CALL_RET(0, hmR0VmxExitSetPendingXcptUD(pVCpu, pVmxTransient));
13015#else
13016 case VMX_EXIT_VMCLEAR:
13017 case VMX_EXIT_VMLAUNCH:
13018 case VMX_EXIT_VMPTRLD:
13019 case VMX_EXIT_VMPTRST:
13020 case VMX_EXIT_VMREAD:
13021 case VMX_EXIT_VMRESUME:
13022 case VMX_EXIT_VMWRITE:
13023 case VMX_EXIT_VMXOFF:
13024 case VMX_EXIT_VMXON:
13025 case VMX_EXIT_INVVPID:
13026 case VMX_EXIT_INVEPT:
13027 return hmR0VmxExitSetPendingXcptUD(pVCpu, pVmxTransient);
13028#endif
13029
13030 case VMX_EXIT_TRIPLE_FAULT: return hmR0VmxExitTripleFault(pVCpu, pVmxTransient);
13031 case VMX_EXIT_NMI_WINDOW: return hmR0VmxExitNmiWindow(pVCpu, pVmxTransient);
13032 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return hmR0VmxExitErrInvalidGuestState(pVCpu, pVmxTransient);
13033
13034 case VMX_EXIT_INIT_SIGNAL:
13035 case VMX_EXIT_SIPI:
13036 case VMX_EXIT_IO_SMI:
13037 case VMX_EXIT_SMI:
13038 case VMX_EXIT_ERR_MSR_LOAD:
13039 case VMX_EXIT_ERR_MACHINE_CHECK:
13040 case VMX_EXIT_PML_FULL:
13041 case VMX_EXIT_VIRTUALIZED_EOI:
13042 case VMX_EXIT_GDTR_IDTR_ACCESS:
13043 case VMX_EXIT_LDTR_TR_ACCESS:
13044 case VMX_EXIT_APIC_WRITE:
13045 case VMX_EXIT_RDRAND:
13046 case VMX_EXIT_RSM:
13047 case VMX_EXIT_VMFUNC:
13048 case VMX_EXIT_ENCLS:
13049 case VMX_EXIT_RDSEED:
13050 case VMX_EXIT_XSAVES:
13051 case VMX_EXIT_XRSTORS:
13052 case VMX_EXIT_UMWAIT:
13053 case VMX_EXIT_TPAUSE:
13054 case VMX_EXIT_LOADIWKEY:
13055 default:
13056 return hmR0VmxExitErrUnexpected(pVCpu, pVmxTransient);
13057 }
13058#undef VMEXIT_CALL_RET
13059}
13060#endif /* !HMVMX_USE_FUNCTION_TABLE */
13061
13062
13063#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13064/**
13065 * Handles a nested-guest VM-exit from hardware-assisted VMX execution.
13066 *
13067 * @returns Strict VBox status code (i.e. informational status codes too).
13068 * @param pVCpu The cross context virtual CPU structure.
13069 * @param pVmxTransient The VMX-transient structure.
13070 */
13071DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13072{
13073 uint32_t const uExitReason = pVmxTransient->uExitReason;
13074 switch (uExitReason)
13075 {
13076 case VMX_EXIT_EPT_MISCONFIG: return hmR0VmxExitEptMisconfig(pVCpu, pVmxTransient);
13077 case VMX_EXIT_EPT_VIOLATION: return hmR0VmxExitEptViolation(pVCpu, pVmxTransient);
13078 case VMX_EXIT_XCPT_OR_NMI: return hmR0VmxExitXcptOrNmiNested(pVCpu, pVmxTransient);
13079 case VMX_EXIT_IO_INSTR: return hmR0VmxExitIoInstrNested(pVCpu, pVmxTransient);
13080 case VMX_EXIT_HLT: return hmR0VmxExitHltNested(pVCpu, pVmxTransient);
13081
13082 /*
13083 * We shouldn't direct host physical interrupts to the nested-guest.
13084 */
13085 case VMX_EXIT_EXT_INT:
13086 return hmR0VmxExitExtInt(pVCpu, pVmxTransient);
13087
13088 /*
13089 * Instructions that cause VM-exits unconditionally or the condition is
13090 * always is taken solely from the nested hypervisor (meaning if the VM-exit
13091 * happens, it's guaranteed to be a nested-guest VM-exit).
13092 *
13093 * - Provides VM-exit instruction length ONLY.
13094 */
13095 case VMX_EXIT_CPUID: /* Unconditional. */
13096 case VMX_EXIT_VMCALL:
13097 case VMX_EXIT_GETSEC:
13098 case VMX_EXIT_INVD:
13099 case VMX_EXIT_XSETBV:
13100 case VMX_EXIT_VMLAUNCH:
13101 case VMX_EXIT_VMRESUME:
13102 case VMX_EXIT_VMXOFF:
13103 case VMX_EXIT_ENCLS: /* Condition specified solely by nested hypervisor. */
13104 case VMX_EXIT_VMFUNC:
13105 return hmR0VmxExitInstrNested(pVCpu, pVmxTransient);
13106
13107 /*
13108 * Instructions that cause VM-exits unconditionally or the condition is
13109 * always is taken solely from the nested hypervisor (meaning if the VM-exit
13110 * happens, it's guaranteed to be a nested-guest VM-exit).
13111 *
13112 * - Provides VM-exit instruction length.
13113 * - Provides VM-exit information.
13114 * - Optionally provides Exit qualification.
13115 *
13116 * Since Exit qualification is 0 for all VM-exits where it is not
13117 * applicable, reading and passing it to the guest should produce
13118 * defined behavior.
13119 *
13120 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
13121 */
13122 case VMX_EXIT_INVEPT: /* Unconditional. */
13123 case VMX_EXIT_INVVPID:
13124 case VMX_EXIT_VMCLEAR:
13125 case VMX_EXIT_VMPTRLD:
13126 case VMX_EXIT_VMPTRST:
13127 case VMX_EXIT_VMXON:
13128 case VMX_EXIT_GDTR_IDTR_ACCESS: /* Condition specified solely by nested hypervisor. */
13129 case VMX_EXIT_LDTR_TR_ACCESS:
13130 case VMX_EXIT_RDRAND:
13131 case VMX_EXIT_RDSEED:
13132 case VMX_EXIT_XSAVES:
13133 case VMX_EXIT_XRSTORS:
13134 case VMX_EXIT_UMWAIT:
13135 case VMX_EXIT_TPAUSE:
13136 return hmR0VmxExitInstrWithInfoNested(pVCpu, pVmxTransient);
13137
13138 case VMX_EXIT_RDTSC: return hmR0VmxExitRdtscNested(pVCpu, pVmxTransient);
13139 case VMX_EXIT_RDTSCP: return hmR0VmxExitRdtscpNested(pVCpu, pVmxTransient);
13140 case VMX_EXIT_RDMSR: return hmR0VmxExitRdmsrNested(pVCpu, pVmxTransient);
13141 case VMX_EXIT_WRMSR: return hmR0VmxExitWrmsrNested(pVCpu, pVmxTransient);
13142 case VMX_EXIT_INVLPG: return hmR0VmxExitInvlpgNested(pVCpu, pVmxTransient);
13143 case VMX_EXIT_INVPCID: return hmR0VmxExitInvpcidNested(pVCpu, pVmxTransient);
13144 case VMX_EXIT_TASK_SWITCH: return hmR0VmxExitTaskSwitchNested(pVCpu, pVmxTransient);
13145 case VMX_EXIT_WBINVD: return hmR0VmxExitWbinvdNested(pVCpu, pVmxTransient);
13146 case VMX_EXIT_MTF: return hmR0VmxExitMtfNested(pVCpu, pVmxTransient);
13147 case VMX_EXIT_APIC_ACCESS: return hmR0VmxExitApicAccessNested(pVCpu, pVmxTransient);
13148 case VMX_EXIT_APIC_WRITE: return hmR0VmxExitApicWriteNested(pVCpu, pVmxTransient);
13149 case VMX_EXIT_VIRTUALIZED_EOI: return hmR0VmxExitVirtEoiNested(pVCpu, pVmxTransient);
13150 case VMX_EXIT_MOV_CRX: return hmR0VmxExitMovCRxNested(pVCpu, pVmxTransient);
13151 case VMX_EXIT_INT_WINDOW: return hmR0VmxExitIntWindowNested(pVCpu, pVmxTransient);
13152 case VMX_EXIT_NMI_WINDOW: return hmR0VmxExitNmiWindowNested(pVCpu, pVmxTransient);
13153 case VMX_EXIT_TPR_BELOW_THRESHOLD: return hmR0VmxExitTprBelowThresholdNested(pVCpu, pVmxTransient);
13154 case VMX_EXIT_MWAIT: return hmR0VmxExitMwaitNested(pVCpu, pVmxTransient);
13155 case VMX_EXIT_MONITOR: return hmR0VmxExitMonitorNested(pVCpu, pVmxTransient);
13156 case VMX_EXIT_PAUSE: return hmR0VmxExitPauseNested(pVCpu, pVmxTransient);
13157
13158 case VMX_EXIT_PREEMPT_TIMER:
13159 {
13160 /** @todo NSTVMX: Preempt timer. */
13161 return hmR0VmxExitPreemptTimer(pVCpu, pVmxTransient);
13162 }
13163
13164 case VMX_EXIT_MOV_DRX: return hmR0VmxExitMovDRxNested(pVCpu, pVmxTransient);
13165 case VMX_EXIT_RDPMC: return hmR0VmxExitRdpmcNested(pVCpu, pVmxTransient);
13166
13167 case VMX_EXIT_VMREAD:
13168 case VMX_EXIT_VMWRITE: return hmR0VmxExitVmreadVmwriteNested(pVCpu, pVmxTransient);
13169
13170 case VMX_EXIT_TRIPLE_FAULT: return hmR0VmxExitTripleFaultNested(pVCpu, pVmxTransient);
13171 case VMX_EXIT_ERR_INVALID_GUEST_STATE: return hmR0VmxExitErrInvalidGuestStateNested(pVCpu, pVmxTransient);
13172
13173 case VMX_EXIT_INIT_SIGNAL:
13174 case VMX_EXIT_SIPI:
13175 case VMX_EXIT_IO_SMI:
13176 case VMX_EXIT_SMI:
13177 case VMX_EXIT_ERR_MSR_LOAD:
13178 case VMX_EXIT_ERR_MACHINE_CHECK:
13179 case VMX_EXIT_PML_FULL:
13180 case VMX_EXIT_RSM:
13181 default:
13182 return hmR0VmxExitErrUnexpected(pVCpu, pVmxTransient);
13183 }
13184}
13185#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
13186
13187
13188/** @name VM-exit helpers.
13189 * @{
13190 */
13191/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
13192/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= VM-exit helpers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
13193/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
13194
13195/** Macro for VM-exits called unexpectedly. */
13196#define HMVMX_UNEXPECTED_EXIT_RET(a_pVCpu, a_HmError) \
13197 do { \
13198 (a_pVCpu)->hm.s.u32HMError = (a_HmError); \
13199 return VERR_VMX_UNEXPECTED_EXIT; \
13200 } while (0)
13201
13202#ifdef VBOX_STRICT
13203/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
13204# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
13205 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
13206
13207# define HMVMX_ASSERT_PREEMPT_CPUID() \
13208 do { \
13209 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
13210 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
13211 } while (0)
13212
13213# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
13214 do { \
13215 AssertPtr((a_pVCpu)); \
13216 AssertPtr((a_pVmxTransient)); \
13217 Assert((a_pVmxTransient)->fVMEntryFailed == false); \
13218 Assert((a_pVmxTransient)->pVmcsInfo); \
13219 Assert(ASMIntAreEnabled()); \
13220 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
13221 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
13222 Log4Func(("vcpu[%RU32]\n", (a_pVCpu)->idCpu)); \
13223 HMVMX_ASSERT_PREEMPT_SAFE(a_pVCpu); \
13224 if (!VMMRZCallRing3IsEnabled((a_pVCpu))) \
13225 HMVMX_ASSERT_PREEMPT_CPUID(); \
13226 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
13227 } while (0)
13228
13229# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
13230 do { \
13231 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); \
13232 Assert((a_pVmxTransient)->fIsNestedGuest); \
13233 } while (0)
13234
13235# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
13236 do { \
13237 Log4Func(("\n")); \
13238 } while (0)
13239#else
13240# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
13241 do { \
13242 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
13243 NOREF((a_pVCpu)); NOREF((a_pVmxTransient)); \
13244 } while (0)
13245
13246# define HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) \
13247 do { HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient); } while (0)
13248
13249# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(a_pVCpu, a_pVmxTransient) do { } while (0)
13250#endif
13251
13252#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13253/** Macro that does the necessary privilege checks and intercepted VM-exits for
13254 * guests that attempted to execute a VMX instruction. */
13255# define HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(a_pVCpu, a_uExitReason) \
13256 do \
13257 { \
13258 VBOXSTRICTRC rcStrictTmp = hmR0VmxCheckExitDueToVmxInstr((a_pVCpu), (a_uExitReason)); \
13259 if (rcStrictTmp == VINF_SUCCESS) \
13260 { /* likely */ } \
13261 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
13262 { \
13263 Assert((a_pVCpu)->hm.s.Event.fPending); \
13264 Log4Func(("Privilege checks failed -> %#x\n", VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo))); \
13265 return VINF_SUCCESS; \
13266 } \
13267 else \
13268 { \
13269 int rcTmp = VBOXSTRICTRC_VAL(rcStrictTmp); \
13270 AssertMsgFailedReturn(("Unexpected failure. rc=%Rrc", rcTmp), rcTmp); \
13271 } \
13272 } while (0)
13273
13274/** Macro that decodes a memory operand for an VM-exit caused by an instruction. */
13275# define HMVMX_DECODE_MEM_OPERAND(a_pVCpu, a_uExitInstrInfo, a_uExitQual, a_enmMemAccess, a_pGCPtrEffAddr) \
13276 do \
13277 { \
13278 VBOXSTRICTRC rcStrictTmp = hmR0VmxDecodeMemOperand((a_pVCpu), (a_uExitInstrInfo), (a_uExitQual), (a_enmMemAccess), \
13279 (a_pGCPtrEffAddr)); \
13280 if (rcStrictTmp == VINF_SUCCESS) \
13281 { /* likely */ } \
13282 else if (rcStrictTmp == VINF_HM_PENDING_XCPT) \
13283 { \
13284 uint8_t const uXcptTmp = VMX_ENTRY_INT_INFO_VECTOR((a_pVCpu)->hm.s.Event.u64IntInfo); \
13285 Log4Func(("Memory operand decoding failed, raising xcpt %#x\n", uXcptTmp)); \
13286 NOREF(uXcptTmp); \
13287 return VINF_SUCCESS; \
13288 } \
13289 else \
13290 { \
13291 Log4Func(("hmR0VmxDecodeMemOperand failed. rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrictTmp))); \
13292 return rcStrictTmp; \
13293 } \
13294 } while (0)
13295#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
13296
13297
13298/**
13299 * Advances the guest RIP by the specified number of bytes.
13300 *
13301 * @param pVCpu The cross context virtual CPU structure.
13302 * @param cbInstr Number of bytes to advance the RIP by.
13303 *
13304 * @remarks No-long-jump zone!!!
13305 */
13306DECLINLINE(void) hmR0VmxAdvanceGuestRipBy(PVMCPUCC pVCpu, uint32_t cbInstr)
13307{
13308 /* Advance the RIP. */
13309 pVCpu->cpum.GstCtx.rip += cbInstr;
13310 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
13311
13312 /* Update interrupt inhibition. */
13313 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13314 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
13315 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
13316}
13317
13318
13319/**
13320 * Advances the guest RIP after reading it from the VMCS.
13321 *
13322 * @returns VBox status code, no informational status codes.
13323 * @param pVCpu The cross context virtual CPU structure.
13324 * @param pVmxTransient The VMX-transient structure.
13325 *
13326 * @remarks No-long-jump zone!!!
13327 */
13328static int hmR0VmxAdvanceGuestRip(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13329{
13330 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
13331 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
13332 AssertRCReturn(rc, rc);
13333
13334 hmR0VmxAdvanceGuestRipBy(pVCpu, pVmxTransient->cbExitInstr);
13335 return VINF_SUCCESS;
13336}
13337
13338
13339/**
13340 * Handle a condition that occurred while delivering an event through the guest or
13341 * nested-guest IDT.
13342 *
13343 * @returns Strict VBox status code (i.e. informational status codes too).
13344 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
13345 * @retval VINF_HM_DOUBLE_FAULT if a \#DF condition was detected and we ought
13346 * to continue execution of the guest which will delivery the \#DF.
13347 * @retval VINF_EM_RESET if we detected a triple-fault condition.
13348 * @retval VERR_EM_GUEST_CPU_HANG if we detected a guest CPU hang.
13349 *
13350 * @param pVCpu The cross context virtual CPU structure.
13351 * @param pVmxTransient The VMX-transient structure.
13352 *
13353 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
13354 * Additionally, HMVMX_READ_EXIT_QUALIFICATION is required if the VM-exit
13355 * is due to an EPT violation, PML full or SPP-related event.
13356 *
13357 * @remarks No-long-jump zone!!!
13358 */
13359static VBOXSTRICTRC hmR0VmxCheckExitDueToEventDelivery(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13360{
13361 Assert(!pVCpu->hm.s.Event.fPending);
13362 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
13363 if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
13364 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
13365 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
13366 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_EXIT_QUALIFICATION);
13367
13368 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
13369 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
13370 uint32_t const uIdtVectorInfo = pVmxTransient->uIdtVectoringInfo;
13371 uint32_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
13372 if (VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectorInfo))
13373 {
13374 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(uIdtVectorInfo);
13375 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectorInfo);
13376
13377 /*
13378 * If the event was a software interrupt (generated with INT n) or a software exception
13379 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we
13380 * can handle the VM-exit and continue guest execution which will re-execute the
13381 * instruction rather than re-injecting the exception, as that can cause premature
13382 * trips to ring-3 before injection and involve TRPM which currently has no way of
13383 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses
13384 * the problem).
13385 */
13386 IEMXCPTRAISE enmRaise;
13387 IEMXCPTRAISEINFO fRaiseInfo;
13388 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
13389 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
13390 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
13391 {
13392 enmRaise = IEMXCPTRAISE_REEXEC_INSTR;
13393 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
13394 }
13395 else if (VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo))
13396 {
13397 uint32_t const uExitVectorType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
13398 uint8_t const uExitVector = VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo);
13399 Assert(uExitVectorType == VMX_EXIT_INT_INFO_TYPE_HW_XCPT);
13400
13401 uint32_t const fIdtVectorFlags = hmR0VmxGetIemXcptFlags(uIdtVector, uIdtVectorType);
13402 uint32_t const fExitVectorFlags = hmR0VmxGetIemXcptFlags(uExitVector, uExitVectorType);
13403
13404 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo);
13405
13406 /* Determine a vectoring #PF condition, see comment in hmR0VmxExitXcptPF(). */
13407 if (fRaiseInfo & (IEMXCPTRAISEINFO_EXT_INT_PF | IEMXCPTRAISEINFO_NMI_PF))
13408 {
13409 pVmxTransient->fVectoringPF = true;
13410 enmRaise = IEMXCPTRAISE_PREV_EVENT;
13411 }
13412 }
13413 else
13414 {
13415 /*
13416 * If an exception or hardware interrupt delivery caused an EPT violation/misconfig or APIC access
13417 * VM-exit, then the VM-exit interruption-information will not be valid and we end up here.
13418 * It is sufficient to reflect the original event to the guest after handling the VM-exit.
13419 */
13420 Assert( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
13421 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
13422 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
13423 enmRaise = IEMXCPTRAISE_PREV_EVENT;
13424 fRaiseInfo = IEMXCPTRAISEINFO_NONE;
13425 }
13426
13427 /*
13428 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig
13429 * etc.) occurred while delivering the NMI, we need to clear the block-by-NMI field in the guest
13430 * interruptibility-state before re-delivering the NMI after handling the VM-exit. Otherwise the
13431 * subsequent VM-entry would fail, see @bugref{7445}.
13432 *
13433 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception".
13434 */
13435 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI
13436 && enmRaise == IEMXCPTRAISE_PREV_EVENT
13437 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
13438 && CPUMIsGuestNmiBlocking(pVCpu))
13439 {
13440 CPUMSetGuestNmiBlocking(pVCpu, false);
13441 }
13442
13443 switch (enmRaise)
13444 {
13445 case IEMXCPTRAISE_CURRENT_XCPT:
13446 {
13447 Log4Func(("IDT: Pending secondary Xcpt: idtinfo=%#RX64 exitinfo=%#RX64\n", uIdtVectorInfo, uExitIntInfo));
13448 Assert(rcStrict == VINF_SUCCESS);
13449 break;
13450 }
13451
13452 case IEMXCPTRAISE_PREV_EVENT:
13453 {
13454 uint32_t u32ErrCode;
13455 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectorInfo))
13456 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
13457 else
13458 u32ErrCode = 0;
13459
13460 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF, see hmR0VmxExitXcptPF(). */
13461 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectReflect);
13462 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(uIdtVectorInfo), 0 /* cbInstr */,
13463 u32ErrCode, pVCpu->cpum.GstCtx.cr2);
13464
13465 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo,
13466 pVCpu->hm.s.Event.u32ErrCode));
13467 Assert(rcStrict == VINF_SUCCESS);
13468 break;
13469 }
13470
13471 case IEMXCPTRAISE_REEXEC_INSTR:
13472 Assert(rcStrict == VINF_SUCCESS);
13473 break;
13474
13475 case IEMXCPTRAISE_DOUBLE_FAULT:
13476 {
13477 /*
13478 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the
13479 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF.
13480 */
13481 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF)
13482 {
13483 pVmxTransient->fVectoringDoublePF = true;
13484 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo,
13485 pVCpu->cpum.GstCtx.cr2));
13486 rcStrict = VINF_SUCCESS;
13487 }
13488 else
13489 {
13490 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectConvertDF);
13491 hmR0VmxSetPendingXcptDF(pVCpu);
13492 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,
13493 uIdtVector, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
13494 rcStrict = VINF_HM_DOUBLE_FAULT;
13495 }
13496 break;
13497 }
13498
13499 case IEMXCPTRAISE_TRIPLE_FAULT:
13500 {
13501 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector,
13502 VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo)));
13503 rcStrict = VINF_EM_RESET;
13504 break;
13505 }
13506
13507 case IEMXCPTRAISE_CPU_HANG:
13508 {
13509 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo));
13510 rcStrict = VERR_EM_GUEST_CPU_HANG;
13511 break;
13512 }
13513
13514 default:
13515 {
13516 AssertMsgFailed(("IDT: vcpu[%RU32] Unexpected/invalid value! enmRaise=%#x\n", pVCpu->idCpu, enmRaise));
13517 rcStrict = VERR_VMX_IPE_2;
13518 break;
13519 }
13520 }
13521 }
13522 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
13523 && !CPUMIsGuestNmiBlocking(pVCpu))
13524 {
13525 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
13526 && VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo) != X86_XCPT_DF
13527 && VMX_EXIT_INT_INFO_IS_NMI_UNBLOCK_IRET(uExitIntInfo))
13528 {
13529 /*
13530 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in
13531 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so
13532 * that virtual NMIs remain blocked until the IRET execution is completed.
13533 *
13534 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
13535 */
13536 CPUMSetGuestNmiBlocking(pVCpu, true);
13537 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
13538 }
13539 else if ( pVmxTransient->uExitReason == VMX_EXIT_EPT_VIOLATION
13540 || pVmxTransient->uExitReason == VMX_EXIT_PML_FULL
13541 || pVmxTransient->uExitReason == VMX_EXIT_SPP_EVENT)
13542 {
13543 /*
13544 * Execution of IRET caused an EPT violation, page-modification log-full event or
13545 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the
13546 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so
13547 * that virtual NMIs remain blocked until the IRET execution is completed.
13548 *
13549 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET"
13550 */
13551 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
13552 {
13553 CPUMSetGuestNmiBlocking(pVCpu, true);
13554 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
13555 }
13556 }
13557 }
13558
13559 Assert( rcStrict == VINF_SUCCESS || rcStrict == VINF_HM_DOUBLE_FAULT
13560 || rcStrict == VINF_EM_RESET || rcStrict == VERR_EM_GUEST_CPU_HANG);
13561 return rcStrict;
13562}
13563
13564
13565#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13566/**
13567 * Perform the relevant VMX instruction checks for VM-exits that occurred due to the
13568 * guest attempting to execute a VMX instruction.
13569 *
13570 * @returns Strict VBox status code (i.e. informational status codes too).
13571 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
13572 * @retval VINF_HM_PENDING_XCPT if an exception was raised.
13573 *
13574 * @param pVCpu The cross context virtual CPU structure.
13575 * @param uExitReason The VM-exit reason.
13576 *
13577 * @todo NSTVMX: Document other error codes when VM-exit is implemented.
13578 * @remarks No-long-jump zone!!!
13579 */
13580static VBOXSTRICTRC hmR0VmxCheckExitDueToVmxInstr(PVMCPUCC pVCpu, uint32_t uExitReason)
13581{
13582 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS
13583 | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
13584
13585 /*
13586 * The physical CPU would have already checked the CPU mode/code segment.
13587 * We shall just assert here for paranoia.
13588 * See Intel spec. 25.1.1 "Relative Priority of Faults and VM Exits".
13589 */
13590 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
13591 Assert( !CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)
13592 || CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx));
13593
13594 if (uExitReason == VMX_EXIT_VMXON)
13595 {
13596 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
13597
13598 /*
13599 * We check CR4.VMXE because it is required to be always set while in VMX operation
13600 * by physical CPUs and our CR4 read-shadow is only consulted when executing specific
13601 * instructions (CLTS, LMSW, MOV CR, and SMSW) and thus doesn't affect CPU operation
13602 * otherwise (i.e. physical CPU won't automatically #UD if Cr4Shadow.VMXE is 0).
13603 */
13604 if (!CPUMIsGuestVmxEnabled(&pVCpu->cpum.GstCtx))
13605 {
13606 Log4Func(("CR4.VMXE is not set -> #UD\n"));
13607 hmR0VmxSetPendingXcptUD(pVCpu);
13608 return VINF_HM_PENDING_XCPT;
13609 }
13610 }
13611 else if (!CPUMIsGuestInVmxRootMode(&pVCpu->cpum.GstCtx))
13612 {
13613 /*
13614 * The guest has not entered VMX operation but attempted to execute a VMX instruction
13615 * (other than VMXON), we need to raise a #UD.
13616 */
13617 Log4Func(("Not in VMX root mode -> #UD\n"));
13618 hmR0VmxSetPendingXcptUD(pVCpu);
13619 return VINF_HM_PENDING_XCPT;
13620 }
13621
13622 /* All other checks (including VM-exit intercepts) are handled by IEM instruction emulation. */
13623 return VINF_SUCCESS;
13624}
13625
13626
13627/**
13628 * Decodes the memory operand of an instruction that caused a VM-exit.
13629 *
13630 * The Exit qualification field provides the displacement field for memory
13631 * operand instructions, if any.
13632 *
13633 * @returns Strict VBox status code (i.e. informational status codes too).
13634 * @retval VINF_SUCCESS if the operand was successfully decoded.
13635 * @retval VINF_HM_PENDING_XCPT if an exception was raised while decoding the
13636 * operand.
13637 * @param pVCpu The cross context virtual CPU structure.
13638 * @param uExitInstrInfo The VM-exit instruction information field.
13639 * @param enmMemAccess The memory operand's access type (read or write).
13640 * @param GCPtrDisp The instruction displacement field, if any. For
13641 * RIP-relative addressing pass RIP + displacement here.
13642 * @param pGCPtrMem Where to store the effective destination memory address.
13643 *
13644 * @remarks Warning! This function ASSUMES the instruction cannot be used in real or
13645 * virtual-8086 mode hence skips those checks while verifying if the
13646 * segment is valid.
13647 */
13648static VBOXSTRICTRC hmR0VmxDecodeMemOperand(PVMCPUCC pVCpu, uint32_t uExitInstrInfo, RTGCPTR GCPtrDisp, VMXMEMACCESS enmMemAccess,
13649 PRTGCPTR pGCPtrMem)
13650{
13651 Assert(pGCPtrMem);
13652 Assert(!CPUMIsGuestInRealOrV86Mode(pVCpu));
13653 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_EFER
13654 | CPUMCTX_EXTRN_CR0);
13655
13656 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
13657 static uint64_t const s_auAccessSizeMasks[] = { sizeof(uint16_t), sizeof(uint32_t), sizeof(uint64_t) };
13658 AssertCompile(RT_ELEMENTS(s_auAccessSizeMasks) == RT_ELEMENTS(s_auAddrSizeMasks));
13659
13660 VMXEXITINSTRINFO ExitInstrInfo;
13661 ExitInstrInfo.u = uExitInstrInfo;
13662 uint8_t const uAddrSize = ExitInstrInfo.All.u3AddrSize;
13663 uint8_t const iSegReg = ExitInstrInfo.All.iSegReg;
13664 bool const fIdxRegValid = !ExitInstrInfo.All.fIdxRegInvalid;
13665 uint8_t const iIdxReg = ExitInstrInfo.All.iIdxReg;
13666 uint8_t const uScale = ExitInstrInfo.All.u2Scaling;
13667 bool const fBaseRegValid = !ExitInstrInfo.All.fBaseRegInvalid;
13668 uint8_t const iBaseReg = ExitInstrInfo.All.iBaseReg;
13669 bool const fIsMemOperand = !ExitInstrInfo.All.fIsRegOperand;
13670 bool const fIsLongMode = CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx);
13671
13672 /*
13673 * Validate instruction information.
13674 * This shouldn't happen on real hardware but useful while testing our nested hardware-virtualization code.
13675 */
13676 AssertLogRelMsgReturn(uAddrSize < RT_ELEMENTS(s_auAddrSizeMasks),
13677 ("Invalid address size. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_1);
13678 AssertLogRelMsgReturn(iSegReg < X86_SREG_COUNT,
13679 ("Invalid segment register. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_2);
13680 AssertLogRelMsgReturn(fIsMemOperand,
13681 ("Expected memory operand. ExitInstrInfo=%#RX32\n", ExitInstrInfo.u), VERR_VMX_IPE_3);
13682
13683 /*
13684 * Compute the complete effective address.
13685 *
13686 * See AMD instruction spec. 1.4.2 "SIB Byte Format"
13687 * See AMD spec. 4.5.2 "Segment Registers".
13688 */
13689 RTGCPTR GCPtrMem = GCPtrDisp;
13690 if (fBaseRegValid)
13691 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iBaseReg].u64;
13692 if (fIdxRegValid)
13693 GCPtrMem += pVCpu->cpum.GstCtx.aGRegs[iIdxReg].u64 << uScale;
13694
13695 RTGCPTR const GCPtrOff = GCPtrMem;
13696 if ( !fIsLongMode
13697 || iSegReg >= X86_SREG_FS)
13698 GCPtrMem += pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
13699 GCPtrMem &= s_auAddrSizeMasks[uAddrSize];
13700
13701 /*
13702 * Validate effective address.
13703 * See AMD spec. 4.5.3 "Segment Registers in 64-Bit Mode".
13704 */
13705 uint8_t const cbAccess = s_auAccessSizeMasks[uAddrSize];
13706 Assert(cbAccess > 0);
13707 if (fIsLongMode)
13708 {
13709 if (X86_IS_CANONICAL(GCPtrMem))
13710 {
13711 *pGCPtrMem = GCPtrMem;
13712 return VINF_SUCCESS;
13713 }
13714
13715 /** @todo r=ramshankar: We should probably raise \#SS or \#GP. See AMD spec. 4.12.2
13716 * "Data Limit Checks in 64-bit Mode". */
13717 Log4Func(("Long mode effective address is not canonical GCPtrMem=%#RX64\n", GCPtrMem));
13718 hmR0VmxSetPendingXcptGP(pVCpu, 0);
13719 return VINF_HM_PENDING_XCPT;
13720 }
13721
13722 /*
13723 * This is a watered down version of iemMemApplySegment().
13724 * Parts that are not applicable for VMX instructions like real-or-v8086 mode
13725 * and segment CPL/DPL checks are skipped.
13726 */
13727 RTGCPTR32 const GCPtrFirst32 = (RTGCPTR32)GCPtrOff;
13728 RTGCPTR32 const GCPtrLast32 = GCPtrFirst32 + cbAccess - 1;
13729 PCCPUMSELREG pSel = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
13730
13731 /* Check if the segment is present and usable. */
13732 if ( pSel->Attr.n.u1Present
13733 && !pSel->Attr.n.u1Unusable)
13734 {
13735 Assert(pSel->Attr.n.u1DescType);
13736 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
13737 {
13738 /* Check permissions for the data segment. */
13739 if ( enmMemAccess == VMXMEMACCESS_WRITE
13740 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE))
13741 {
13742 Log4Func(("Data segment access invalid. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
13743 hmR0VmxSetPendingXcptGP(pVCpu, iSegReg);
13744 return VINF_HM_PENDING_XCPT;
13745 }
13746
13747 /* Check limits if it's a normal data segment. */
13748 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
13749 {
13750 if ( GCPtrFirst32 > pSel->u32Limit
13751 || GCPtrLast32 > pSel->u32Limit)
13752 {
13753 Log4Func(("Data segment limit exceeded. "
13754 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
13755 GCPtrLast32, pSel->u32Limit));
13756 if (iSegReg == X86_SREG_SS)
13757 hmR0VmxSetPendingXcptSS(pVCpu, 0);
13758 else
13759 hmR0VmxSetPendingXcptGP(pVCpu, 0);
13760 return VINF_HM_PENDING_XCPT;
13761 }
13762 }
13763 else
13764 {
13765 /* Check limits if it's an expand-down data segment.
13766 Note! The upper boundary is defined by the B bit, not the G bit! */
13767 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
13768 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
13769 {
13770 Log4Func(("Expand-down data segment limit exceeded. "
13771 "iSegReg=%#x GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n", iSegReg, GCPtrFirst32,
13772 GCPtrLast32, pSel->u32Limit));
13773 if (iSegReg == X86_SREG_SS)
13774 hmR0VmxSetPendingXcptSS(pVCpu, 0);
13775 else
13776 hmR0VmxSetPendingXcptGP(pVCpu, 0);
13777 return VINF_HM_PENDING_XCPT;
13778 }
13779 }
13780 }
13781 else
13782 {
13783 /* Check permissions for the code segment. */
13784 if ( enmMemAccess == VMXMEMACCESS_WRITE
13785 || ( enmMemAccess == VMXMEMACCESS_READ
13786 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)))
13787 {
13788 Log4Func(("Code segment access invalid. Attr=%#RX32\n", pSel->Attr.u));
13789 Assert(!CPUMIsGuestInRealOrV86ModeEx(&pVCpu->cpum.GstCtx));
13790 hmR0VmxSetPendingXcptGP(pVCpu, 0);
13791 return VINF_HM_PENDING_XCPT;
13792 }
13793
13794 /* Check limits for the code segment (normal/expand-down not applicable for code segments). */
13795 if ( GCPtrFirst32 > pSel->u32Limit
13796 || GCPtrLast32 > pSel->u32Limit)
13797 {
13798 Log4Func(("Code segment limit exceeded. GCPtrFirst32=%#RX32 GCPtrLast32=%#RX32 u32Limit=%#RX32\n",
13799 GCPtrFirst32, GCPtrLast32, pSel->u32Limit));
13800 if (iSegReg == X86_SREG_SS)
13801 hmR0VmxSetPendingXcptSS(pVCpu, 0);
13802 else
13803 hmR0VmxSetPendingXcptGP(pVCpu, 0);
13804 return VINF_HM_PENDING_XCPT;
13805 }
13806 }
13807 }
13808 else
13809 {
13810 Log4Func(("Not present or unusable segment. iSegReg=%#x Attr=%#RX32\n", iSegReg, pSel->Attr.u));
13811 hmR0VmxSetPendingXcptGP(pVCpu, 0);
13812 return VINF_HM_PENDING_XCPT;
13813 }
13814
13815 *pGCPtrMem = GCPtrMem;
13816 return VINF_SUCCESS;
13817}
13818#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
13819
13820
13821/**
13822 * VM-exit helper for LMSW.
13823 */
13824static VBOXSTRICTRC hmR0VmxExitLmsw(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint16_t uMsw, RTGCPTR GCPtrEffDst)
13825{
13826 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
13827 AssertRCReturn(rc, rc);
13828
13829 VBOXSTRICTRC rcStrict = IEMExecDecodedLmsw(pVCpu, cbInstr, uMsw, GCPtrEffDst);
13830 AssertMsg( rcStrict == VINF_SUCCESS
13831 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13832
13833 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
13834 if (rcStrict == VINF_IEM_RAISED_XCPT)
13835 {
13836 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13837 rcStrict = VINF_SUCCESS;
13838 }
13839
13840 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
13841 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13842 return rcStrict;
13843}
13844
13845
13846/**
13847 * VM-exit helper for CLTS.
13848 */
13849static VBOXSTRICTRC hmR0VmxExitClts(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr)
13850{
13851 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
13852 AssertRCReturn(rc, rc);
13853
13854 VBOXSTRICTRC rcStrict = IEMExecDecodedClts(pVCpu, cbInstr);
13855 AssertMsg( rcStrict == VINF_SUCCESS
13856 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13857
13858 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
13859 if (rcStrict == VINF_IEM_RAISED_XCPT)
13860 {
13861 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13862 rcStrict = VINF_SUCCESS;
13863 }
13864
13865 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
13866 Log4Func(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13867 return rcStrict;
13868}
13869
13870
13871/**
13872 * VM-exit helper for MOV from CRx (CRx read).
13873 */
13874static VBOXSTRICTRC hmR0VmxExitMovFromCrX(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
13875{
13876 Assert(iCrReg < 16);
13877 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
13878
13879 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
13880 AssertRCReturn(rc, rc);
13881
13882 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxRead(pVCpu, cbInstr, iGReg, iCrReg);
13883 AssertMsg( rcStrict == VINF_SUCCESS
13884 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13885
13886 if (iGReg == X86_GREG_xSP)
13887 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_RSP);
13888 else
13889 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
13890#ifdef VBOX_WITH_STATISTICS
13891 switch (iCrReg)
13892 {
13893 case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break;
13894 case 2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break;
13895 case 3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break;
13896 case 4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break;
13897 case 8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break;
13898 }
13899#endif
13900 Log4Func(("CR%d Read access rcStrict=%Rrc\n", iCrReg, VBOXSTRICTRC_VAL(rcStrict)));
13901 return rcStrict;
13902}
13903
13904
13905/**
13906 * VM-exit helper for MOV to CRx (CRx write).
13907 */
13908static VBOXSTRICTRC hmR0VmxExitMovToCrX(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
13909{
13910 HMVMX_CPUMCTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
13911
13912 VBOXSTRICTRC rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg);
13913 AssertMsg( rcStrict == VINF_SUCCESS
13914 || rcStrict == VINF_IEM_RAISED_XCPT
13915 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13916
13917 switch (iCrReg)
13918 {
13919 case 0:
13920 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
13921 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
13922 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
13923 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
13924 break;
13925
13926 case 2:
13927 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write);
13928 /* Nothing to do here, CR2 it's not part of the VMCS. */
13929 break;
13930
13931 case 3:
13932 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR3);
13933 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write);
13934 Log4Func(("CR3 write. rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr3));
13935 break;
13936
13937 case 4:
13938 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR4);
13939 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write);
13940 Log4Func(("CR4 write. rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict),
13941 pVCpu->cpum.GstCtx.cr4, pVCpu->hmr0.s.fLoadSaveGuestXcr0));
13942 break;
13943
13944 case 8:
13945 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
13946 HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_APIC_TPR);
13947 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write);
13948 break;
13949
13950 default:
13951 AssertMsgFailed(("Invalid CRx register %#x\n", iCrReg));
13952 break;
13953 }
13954
13955 if (rcStrict == VINF_IEM_RAISED_XCPT)
13956 {
13957 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
13958 rcStrict = VINF_SUCCESS;
13959 }
13960 return rcStrict;
13961}
13962
13963
13964/**
13965 * VM-exit exception handler for \#PF (Page-fault exception).
13966 *
13967 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
13968 */
13969static VBOXSTRICTRC hmR0VmxExitXcptPF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
13970{
13971 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
13972 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
13973 hmR0VmxReadExitQualVmcs(pVmxTransient);
13974
13975 if (!pVM->hmr0.s.fNestedPaging)
13976 { /* likely */ }
13977 else
13978 {
13979#if !defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) && !defined(HMVMX_ALWAYS_TRAP_PF)
13980 Assert(pVmxTransient->fIsNestedGuest || pVCpu->hmr0.s.fUsingDebugLoop);
13981#endif
13982 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
13983 if (!pVmxTransient->fVectoringDoublePF)
13984 {
13985 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
13986 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual);
13987 }
13988 else
13989 {
13990 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
13991 Assert(!pVmxTransient->fIsNestedGuest);
13992 hmR0VmxSetPendingXcptDF(pVCpu);
13993 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n"));
13994 }
13995 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
13996 return VINF_SUCCESS;
13997 }
13998
13999 Assert(!pVmxTransient->fIsNestedGuest);
14000
14001 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable
14002 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */
14003 if (pVmxTransient->fVectoringPF)
14004 {
14005 Assert(pVCpu->hm.s.Event.fPending);
14006 return VINF_EM_RAW_INJECT_TRPM_EVENT;
14007 }
14008
14009 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14010 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
14011 AssertRCReturn(rc, rc);
14012
14013 Log4Func(("#PF: cs:rip=%#04x:%#RX64 err_code=%#RX32 exit_qual=%#RX64 cr3=%#RX64\n", pCtx->cs.Sel, pCtx->rip,
14014 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual, pCtx->cr3));
14015
14016 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQual, (RTGCUINT)pVmxTransient->uExitIntErrorCode);
14017 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntErrorCode, CPUMCTX2CORE(pCtx), (RTGCPTR)pVmxTransient->uExitQual);
14018
14019 Log4Func(("#PF: rc=%Rrc\n", rc));
14020 if (rc == VINF_SUCCESS)
14021 {
14022 /*
14023 * This is typically a shadow page table sync or a MMIO instruction. But we may have
14024 * emulated something like LTR or a far jump. Any part of the CPU context may have changed.
14025 */
14026 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
14027 TRPMResetTrap(pVCpu);
14028 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
14029 return rc;
14030 }
14031
14032 if (rc == VINF_EM_RAW_GUEST_TRAP)
14033 {
14034 if (!pVmxTransient->fVectoringDoublePF)
14035 {
14036 /* It's a guest page fault and needs to be reflected to the guest. */
14037 uint32_t const uGstErrorCode = TRPMGetErrorCode(pVCpu);
14038 TRPMResetTrap(pVCpu);
14039 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
14040 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 0 /* cbInstr */,
14041 uGstErrorCode, pVmxTransient->uExitQual);
14042 }
14043 else
14044 {
14045 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
14046 TRPMResetTrap(pVCpu);
14047 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
14048 hmR0VmxSetPendingXcptDF(pVCpu);
14049 Log4Func(("#PF: Pending #DF due to vectoring #PF\n"));
14050 }
14051
14052 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
14053 return VINF_SUCCESS;
14054 }
14055
14056 TRPMResetTrap(pVCpu);
14057 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
14058 return rc;
14059}
14060
14061
14062/**
14063 * VM-exit exception handler for \#MF (Math Fault: floating point exception).
14064 *
14065 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
14066 */
14067static VBOXSTRICTRC hmR0VmxExitXcptMF(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14068{
14069 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14070 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
14071
14072 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0);
14073 AssertRCReturn(rc, rc);
14074
14075 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE))
14076 {
14077 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
14078 rc = PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13, 1, 0 /* uTagSrc */);
14079
14080 /** @todo r=ramshankar: The Intel spec. does -not- specify that this VM-exit
14081 * provides VM-exit instruction length. If this causes problem later,
14082 * disassemble the instruction like it's done on AMD-V. */
14083 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
14084 AssertRCReturn(rc2, rc2);
14085 return rc;
14086 }
14087
14088 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbExitInstr,
14089 pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
14090 return VINF_SUCCESS;
14091}
14092
14093
14094/**
14095 * VM-exit exception handler for \#BP (Breakpoint exception).
14096 *
14097 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
14098 */
14099static VBOXSTRICTRC hmR0VmxExitXcptBP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14100{
14101 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14102 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
14103
14104 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
14105 AssertRCReturn(rc, rc);
14106
14107 if (!pVmxTransient->fIsNestedGuest)
14108 rc = DBGFTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(&pVCpu->cpum.GstCtx));
14109 else
14110 rc = VINF_EM_RAW_GUEST_TRAP;
14111
14112 if (rc == VINF_EM_RAW_GUEST_TRAP)
14113 {
14114 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
14115 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
14116 rc = VINF_SUCCESS;
14117 }
14118
14119 Assert(rc == VINF_SUCCESS || rc == VINF_EM_DBG_BREAKPOINT);
14120 return rc;
14121}
14122
14123
14124/**
14125 * VM-exit exception handler for \#AC (Alignment-check exception).
14126 *
14127 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
14128 */
14129static VBOXSTRICTRC hmR0VmxExitXcptAC(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14130{
14131 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14132
14133 /*
14134 * Detect #ACs caused by host having enabled split-lock detection.
14135 * Emulate such instructions.
14136 */
14137 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo,
14138 CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
14139 AssertRCReturn(rc, rc);
14140 /** @todo detect split lock in cpu feature? */
14141 if ( /* 1. If 486-style alignment checks aren't enabled, then this must be a split-lock exception */
14142 !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
14143 /* 2. #AC cannot happen in rings 0-2 except for split-lock detection. */
14144 || CPUMGetGuestCPL(pVCpu) != 3
14145 /* 3. When the EFLAGS.AC != 0 this can only be a split-lock case. */
14146 || !(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_AC) )
14147 {
14148 /*
14149 * Check for debug/trace events and import state accordingly.
14150 */
14151 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitGuestACSplitLock);
14152 PVMCC pVM = pVCpu->pVMR0;
14153 if ( !DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK)
14154 && !VBOXVMM_VMX_SPLIT_LOCK_ENABLED())
14155 {
14156 if (pVM->cCpus == 1)
14157 {
14158#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
14159 rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
14160#else
14161 rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
14162#endif
14163 AssertRCReturn(rc, rc);
14164 }
14165 }
14166 else
14167 {
14168 rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
14169 AssertRCReturn(rc, rc);
14170
14171 VBOXVMM_XCPT_DF(pVCpu, &pVCpu->cpum.GstCtx);
14172
14173 if (DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_VMX_SPLIT_LOCK))
14174 {
14175 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, DBGFEVENT_VMX_SPLIT_LOCK, DBGFEVENTCTX_HM, 0);
14176 if (rcStrict != VINF_SUCCESS)
14177 return rcStrict;
14178 }
14179 }
14180
14181 /*
14182 * Emulate the instruction.
14183 *
14184 * We have to ignore the LOCK prefix here as we must not retrigger the
14185 * detection on the host. This isn't all that satisfactory, though...
14186 */
14187 if (pVM->cCpus == 1)
14188 {
14189 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC\n", pVCpu->cpum.GstCtx.cs.Sel,
14190 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
14191
14192 /** @todo For SMP configs we should do a rendezvous here. */
14193 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
14194 if (rcStrict == VINF_SUCCESS)
14195#if 0 /** @todo r=bird: This is potentially wrong. Might have to just do a whole state sync above and mark everything changed to be safe... */
14196 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged,
14197 HM_CHANGED_GUEST_RIP
14198 | HM_CHANGED_GUEST_RFLAGS
14199 | HM_CHANGED_GUEST_GPRS_MASK
14200 | HM_CHANGED_GUEST_CS
14201 | HM_CHANGED_GUEST_SS);
14202#else
14203 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
14204#endif
14205 else if (rcStrict == VINF_IEM_RAISED_XCPT)
14206 {
14207 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
14208 rcStrict = VINF_SUCCESS;
14209 }
14210 return rcStrict;
14211 }
14212 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 split-lock #AC -> VINF_EM_EMULATE_SPLIT_LOCK\n",
14213 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0));
14214 return VINF_EM_EMULATE_SPLIT_LOCK;
14215 }
14216
14217 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitGuestAC);
14218 Log8Func(("cs:rip=%#04x:%#RX64 rflags=%#RX64 cr0=%#RX64 cpl=%d -> #AC\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
14219 pVCpu->cpum.GstCtx.rflags, pVCpu->cpum.GstCtx.cr0, CPUMGetGuestCPL(pVCpu) ));
14220
14221 /* Re-inject it. We'll detect any nesting before getting here. */
14222 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
14223 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
14224 return VINF_SUCCESS;
14225}
14226
14227
14228/**
14229 * VM-exit exception handler for \#DB (Debug exception).
14230 *
14231 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
14232 */
14233static VBOXSTRICTRC hmR0VmxExitXcptDB(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14234{
14235 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14236 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
14237
14238 /*
14239 * Get the DR6-like values from the Exit qualification and pass it to DBGF for processing.
14240 */
14241 hmR0VmxReadExitQualVmcs(pVmxTransient);
14242
14243 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
14244 uint64_t const uDR6 = X86_DR6_INIT_VAL
14245 | (pVmxTransient->uExitQual & ( X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3
14246 | X86_DR6_BD | X86_DR6_BS));
14247
14248 int rc;
14249 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14250 if (!pVmxTransient->fIsNestedGuest)
14251 {
14252 rc = DBGFTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
14253
14254 /*
14255 * Prevents stepping twice over the same instruction when the guest is stepping using
14256 * EFLAGS.TF and the hypervisor debugger is stepping using MTF.
14257 * Testcase: DOSQEMM, break (using "ba x 1") at cs:rip 0x70:0x774 and step (using "t").
14258 */
14259 if ( rc == VINF_EM_DBG_STEPPED
14260 && (pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG))
14261 {
14262 Assert(pVCpu->hm.s.fSingleInstruction);
14263 rc = VINF_EM_RAW_GUEST_TRAP;
14264 }
14265 }
14266 else
14267 rc = VINF_EM_RAW_GUEST_TRAP;
14268 Log6Func(("rc=%Rrc\n", rc));
14269 if (rc == VINF_EM_RAW_GUEST_TRAP)
14270 {
14271 /*
14272 * The exception was for the guest. Update DR6, DR7.GD and
14273 * IA32_DEBUGCTL.LBR before forwarding it.
14274 * See Intel spec. 27.1 "Architectural State before a VM-Exit".
14275 */
14276 VMMRZCallRing3Disable(pVCpu);
14277 HM_DISABLE_PREEMPT(pVCpu);
14278
14279 pCtx->dr[6] &= ~X86_DR6_B_MASK;
14280 pCtx->dr[6] |= uDR6;
14281 if (CPUMIsGuestDebugStateActive(pVCpu))
14282 ASMSetDR6(pCtx->dr[6]);
14283
14284 HM_RESTORE_PREEMPT();
14285 VMMRZCallRing3Enable(pVCpu);
14286
14287 rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_DR7);
14288 AssertRCReturn(rc, rc);
14289
14290 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
14291 pCtx->dr[7] &= ~(uint64_t)X86_DR7_GD;
14292
14293 /* Paranoia. */
14294 pCtx->dr[7] &= ~(uint64_t)X86_DR7_RAZ_MASK;
14295 pCtx->dr[7] |= X86_DR7_RA1_MASK;
14296
14297 rc = VMXWriteVmcsNw(VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
14298 AssertRC(rc);
14299
14300 /*
14301 * Raise #DB in the guest.
14302 *
14303 * It is important to reflect exactly what the VM-exit gave us (preserving the
14304 * interruption-type) rather than use hmR0VmxSetPendingXcptDB() as the #DB could've
14305 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may
14306 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}.
14307 *
14308 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of
14309 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
14310 */
14311 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
14312 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
14313 return VINF_SUCCESS;
14314 }
14315
14316 /*
14317 * Not a guest trap, must be a hypervisor related debug event then.
14318 * Update DR6 in case someone is interested in it.
14319 */
14320 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
14321 AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
14322 CPUMSetHyperDR6(pVCpu, uDR6);
14323
14324 return rc;
14325}
14326
14327
14328/**
14329 * Hacks its way around the lovely mesa driver's backdoor accesses.
14330 *
14331 * @sa hmR0SvmHandleMesaDrvGp.
14332 */
14333static int hmR0VmxHandleMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
14334{
14335 LogFunc(("cs:rip=%#04x:%#RX64 rcx=%#RX64 rbx=%#RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->rcx, pCtx->rbx));
14336 RT_NOREF(pCtx);
14337
14338 /* For now we'll just skip the instruction. */
14339 return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
14340}
14341
14342
14343/**
14344 * Checks if the \#GP'ing instruction is the mesa driver doing it's lovely
14345 * backdoor logging w/o checking what it is running inside.
14346 *
14347 * This recognizes an "IN EAX,DX" instruction executed in flat ring-3, with the
14348 * backdoor port and magic numbers loaded in registers.
14349 *
14350 * @returns true if it is, false if it isn't.
14351 * @sa hmR0SvmIsMesaDrvGp.
14352 */
14353DECLINLINE(bool) hmR0VmxIsMesaDrvGp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PCPUMCTX pCtx)
14354{
14355 /* 0xed: IN eAX,dx */
14356 uint8_t abInstr[1];
14357 if (pVmxTransient->cbExitInstr != sizeof(abInstr))
14358 return false;
14359
14360 /* Check that it is #GP(0). */
14361 if (pVmxTransient->uExitIntErrorCode != 0)
14362 return false;
14363
14364 /* Check magic and port. */
14365 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RCX)));
14366 /*Log(("hmR0VmxIsMesaDrvGp: rax=%RX64 rdx=%RX64\n", pCtx->rax, pCtx->rdx));*/
14367 if (pCtx->rax != UINT32_C(0x564d5868))
14368 return false;
14369 if (pCtx->dx != UINT32_C(0x5658))
14370 return false;
14371
14372 /* Flat ring-3 CS. */
14373 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_CS);
14374 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CS));
14375 /*Log(("hmR0VmxIsMesaDrvGp: cs.Attr.n.u2Dpl=%d base=%Rx64\n", pCtx->cs.Attr.n.u2Dpl, pCtx->cs.u64Base));*/
14376 if (pCtx->cs.Attr.n.u2Dpl != 3)
14377 return false;
14378 if (pCtx->cs.u64Base != 0)
14379 return false;
14380
14381 /* Check opcode. */
14382 AssertCompile(HMVMX_CPUMCTX_EXTRN_ALL & CPUMCTX_EXTRN_RIP);
14383 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_RIP));
14384 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abInstr, pCtx->rip, sizeof(abInstr));
14385 /*Log(("hmR0VmxIsMesaDrvGp: PGMPhysSimpleReadGCPtr -> %Rrc %#x\n", rc, abInstr[0]));*/
14386 if (RT_FAILURE(rc))
14387 return false;
14388 if (abInstr[0] != 0xed)
14389 return false;
14390
14391 return true;
14392}
14393
14394
14395/**
14396 * VM-exit exception handler for \#GP (General-protection exception).
14397 *
14398 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
14399 */
14400static VBOXSTRICTRC hmR0VmxExitXcptGP(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14401{
14402 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14403 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
14404
14405 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14406 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14407 PVMXVMCSINFOSHARED pVmcsInfoShared = pVmcsInfo->pShared;
14408 if (pVmcsInfoShared->RealMode.fRealOnV86Active)
14409 { /* likely */ }
14410 else
14411 {
14412#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
14413 Assert(pVCpu->hmr0.s.fUsingDebugLoop || pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv || pVmxTransient->fIsNestedGuest);
14414#endif
14415 /*
14416 * If the guest is not in real-mode or we have unrestricted guest execution support, or if we are
14417 * executing a nested-guest, reflect #GP to the guest or nested-guest.
14418 */
14419 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
14420 AssertRCReturn(rc, rc);
14421 Log4Func(("Gst: cs:rip=%#04x:%#RX64 ErrorCode=%#x cr0=%#RX64 cpl=%u tr=%#04x\n", pCtx->cs.Sel, pCtx->rip,
14422 pVmxTransient->uExitIntErrorCode, pCtx->cr0, CPUMGetGuestCPL(pVCpu), pCtx->tr.Sel));
14423
14424 if ( pVmxTransient->fIsNestedGuest
14425 || !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv
14426 || !hmR0VmxIsMesaDrvGp(pVCpu, pVmxTransient, pCtx))
14427 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
14428 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
14429 else
14430 rc = hmR0VmxHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx);
14431 return rc;
14432 }
14433
14434 Assert(CPUMIsGuestInRealModeEx(pCtx));
14435 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.vmx.fUnrestrictedGuest);
14436 Assert(!pVmxTransient->fIsNestedGuest);
14437
14438 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
14439 AssertRCReturn(rc, rc);
14440
14441 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
14442 if (rcStrict == VINF_SUCCESS)
14443 {
14444 if (!CPUMIsGuestInRealModeEx(pCtx))
14445 {
14446 /*
14447 * The guest is no longer in real-mode, check if we can continue executing the
14448 * guest using hardware-assisted VMX. Otherwise, fall back to emulation.
14449 */
14450 pVmcsInfoShared->RealMode.fRealOnV86Active = false;
14451 if (HMCanExecuteVmxGuest(pVCpu->pVMR0, pVCpu, pCtx))
14452 {
14453 Log4Func(("Mode changed but guest still suitable for executing using hardware-assisted VMX\n"));
14454 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
14455 }
14456 else
14457 {
14458 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n"));
14459 rcStrict = VINF_EM_RESCHEDULE;
14460 }
14461 }
14462 else
14463 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
14464 }
14465 else if (rcStrict == VINF_IEM_RAISED_XCPT)
14466 {
14467 rcStrict = VINF_SUCCESS;
14468 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
14469 }
14470 return VBOXSTRICTRC_VAL(rcStrict);
14471}
14472
14473
14474/**
14475 * VM-exit exception handler wrapper for all other exceptions that are not handled
14476 * by a specific handler.
14477 *
14478 * This simply re-injects the exception back into the VM without any special
14479 * processing.
14480 *
14481 * @remarks Requires all fields in HMVMX_READ_XCPT_INFO to be read from the VMCS.
14482 */
14483static VBOXSTRICTRC hmR0VmxExitXcptOthers(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14484{
14485 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14486
14487#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
14488 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14489 AssertMsg(pVCpu->hmr0.s.fUsingDebugLoop || pVmcsInfo->pShared->RealMode.fRealOnV86Active || pVmxTransient->fIsNestedGuest,
14490 ("uVector=%#x u32XcptBitmap=%#X32\n",
14491 VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVmcsInfo->u32XcptBitmap));
14492 NOREF(pVmcsInfo);
14493#endif
14494
14495 /*
14496 * Re-inject the exception into the guest. This cannot be a double-fault condition which
14497 * would have been handled while checking exits due to event delivery.
14498 */
14499 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
14500
14501#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
14502 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
14503 AssertRCReturn(rc, rc);
14504 Log4Func(("Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip));
14505#endif
14506
14507#ifdef VBOX_WITH_STATISTICS
14508 switch (uVector)
14509 {
14510 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE); break;
14511 case X86_XCPT_DB: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB); break;
14512 case X86_XCPT_BP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP); break;
14513 case X86_XCPT_OF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestOF); break;
14514 case X86_XCPT_BR: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBR); break;
14515 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); break;
14516 case X86_XCPT_NM: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestOF); break;
14517 case X86_XCPT_DF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDF); break;
14518 case X86_XCPT_TS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestTS); break;
14519 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); break;
14520 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); break;
14521 case X86_XCPT_GP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP); break;
14522 case X86_XCPT_PF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); break;
14523 case X86_XCPT_MF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF); break;
14524 case X86_XCPT_AC: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestAC); break;
14525 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF); break;
14526 default:
14527 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
14528 break;
14529 }
14530#endif
14531
14532 /* We should never call this function for a page-fault, we'd need to pass on the fault address below otherwise. */
14533 Assert(!VMX_EXIT_INT_INFO_IS_XCPT_PF(pVmxTransient->uExitIntInfo));
14534 NOREF(uVector);
14535
14536 /* Re-inject the original exception into the guest. */
14537 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
14538 pVmxTransient->cbExitInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */);
14539 return VINF_SUCCESS;
14540}
14541
14542
14543/**
14544 * VM-exit exception handler for all exceptions (except NMIs!).
14545 *
14546 * @remarks This may be called for both guests and nested-guests. Take care to not
14547 * make assumptions and avoid doing anything that is not relevant when
14548 * executing a nested-guest (e.g., Mesa driver hacks).
14549 */
14550static VBOXSTRICTRC hmR0VmxExitXcpt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14551{
14552 HMVMX_ASSERT_READ(pVmxTransient, HMVMX_READ_XCPT_INFO);
14553
14554 /*
14555 * If this VM-exit occurred while delivering an event through the guest IDT, take
14556 * action based on the return code and additional hints (e.g. for page-faults)
14557 * that will be updated in the VMX transient structure.
14558 */
14559 VBOXSTRICTRC rcStrict = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
14560 if (rcStrict == VINF_SUCCESS)
14561 {
14562 /*
14563 * If an exception caused a VM-exit due to delivery of an event, the original
14564 * event may have to be re-injected into the guest. We shall reinject it and
14565 * continue guest execution. However, page-fault is a complicated case and
14566 * needs additional processing done in hmR0VmxExitXcptPF().
14567 */
14568 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
14569 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
14570 if ( !pVCpu->hm.s.Event.fPending
14571 || uVector == X86_XCPT_PF)
14572 {
14573 switch (uVector)
14574 {
14575 case X86_XCPT_PF: return hmR0VmxExitXcptPF(pVCpu, pVmxTransient);
14576 case X86_XCPT_GP: return hmR0VmxExitXcptGP(pVCpu, pVmxTransient);
14577 case X86_XCPT_MF: return hmR0VmxExitXcptMF(pVCpu, pVmxTransient);
14578 case X86_XCPT_DB: return hmR0VmxExitXcptDB(pVCpu, pVmxTransient);
14579 case X86_XCPT_BP: return hmR0VmxExitXcptBP(pVCpu, pVmxTransient);
14580 case X86_XCPT_AC: return hmR0VmxExitXcptAC(pVCpu, pVmxTransient);
14581 default:
14582 return hmR0VmxExitXcptOthers(pVCpu, pVmxTransient);
14583 }
14584 }
14585 /* else: inject pending event before resuming guest execution. */
14586 }
14587 else if (rcStrict == VINF_HM_DOUBLE_FAULT)
14588 {
14589 Assert(pVCpu->hm.s.Event.fPending);
14590 rcStrict = VINF_SUCCESS;
14591 }
14592
14593 return rcStrict;
14594}
14595/** @} */
14596
14597
14598/** @name VM-exit handlers.
14599 * @{
14600 */
14601/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
14602/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
14603/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
14604
14605/**
14606 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
14607 */
14608HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14609{
14610 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14611 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
14612 /* Windows hosts (32-bit and 64-bit) have DPC latency issues. See @bugref{6853}. */
14613 if (VMMR0ThreadCtxHookIsEnabled(pVCpu))
14614 return VINF_SUCCESS;
14615 return VINF_EM_RAW_INTERRUPT;
14616}
14617
14618
14619/**
14620 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI). Conditional
14621 * VM-exit.
14622 */
14623HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14624{
14625 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14626 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
14627
14628 hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
14629
14630 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);
14631 uint8_t const uVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);
14632 Assert(VMX_EXIT_INT_INFO_IS_VALID(pVmxTransient->uExitIntInfo));
14633
14634 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14635 Assert( !(pVmcsInfo->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
14636 && uExitIntType != VMX_EXIT_INT_INFO_TYPE_EXT_INT);
14637 NOREF(pVmcsInfo);
14638
14639 VBOXSTRICTRC rcStrict;
14640 switch (uExitIntType)
14641 {
14642 /*
14643 * Host physical NMIs:
14644 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we
14645 * injected it ourselves and anything we inject is not going to cause a VM-exit directly
14646 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2].
14647 *
14648 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
14649 * See Intel spec. 27.5.5 "Updating Non-Register State".
14650 */
14651 case VMX_EXIT_INT_INFO_TYPE_NMI:
14652 {
14653 rcStrict = hmR0VmxExitHostNmi(pVCpu, pVmcsInfo);
14654 break;
14655 }
14656
14657 /*
14658 * Privileged software exceptions (#DB from ICEBP),
14659 * Software exceptions (#BP and #OF),
14660 * Hardware exceptions:
14661 * Process the required exceptions and resume guest execution if possible.
14662 */
14663 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
14664 Assert(uVector == X86_XCPT_DB);
14665 RT_FALL_THRU();
14666 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
14667 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF || uExitIntType == VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT);
14668 RT_FALL_THRU();
14669 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
14670 {
14671 NOREF(uVector);
14672 hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
14673 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
14674 hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
14675 hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
14676
14677 rcStrict = hmR0VmxExitXcpt(pVCpu, pVmxTransient);
14678 break;
14679 }
14680
14681 default:
14682 {
14683 pVCpu->hm.s.u32HMError = pVmxTransient->uExitIntInfo;
14684 rcStrict = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
14685 AssertMsgFailed(("Invalid/unexpected VM-exit interruption info %#x\n", pVmxTransient->uExitIntInfo));
14686 break;
14687 }
14688 }
14689
14690 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
14691 return rcStrict;
14692}
14693
14694
14695/**
14696 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
14697 */
14698HMVMX_EXIT_NSRC_DECL hmR0VmxExitIntWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14699{
14700 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14701
14702 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
14703 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14704 hmR0VmxClearIntWindowExitVmcs(pVmcsInfo);
14705
14706 /* Evaluate and deliver pending events and resume guest execution. */
14707 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
14708 return VINF_SUCCESS;
14709}
14710
14711
14712/**
14713 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
14714 */
14715HMVMX_EXIT_NSRC_DECL hmR0VmxExitNmiWindow(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14716{
14717 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14718
14719 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14720 if (RT_UNLIKELY(!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))) /** @todo NSTVMX: Turn this into an assertion. */
14721 {
14722 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
14723 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
14724 }
14725
14726 Assert(!CPUMIsGuestNmiBlocking(pVCpu));
14727
14728 /*
14729 * If block-by-STI is set when we get this VM-exit, it means the CPU doesn't block NMIs following STI.
14730 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}.
14731 */
14732 uint32_t fIntrState;
14733 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
14734 AssertRC(rc);
14735 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
14736 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
14737 {
14738 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
14739 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
14740
14741 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
14742 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
14743 AssertRC(rc);
14744 }
14745
14746 /* Indicate that we no longer need to VM-exit when the guest is ready to receive NMIs, it is now ready */
14747 hmR0VmxClearNmiWindowExitVmcs(pVmcsInfo);
14748
14749 /* Evaluate and deliver pending events and resume guest execution. */
14750 return VINF_SUCCESS;
14751}
14752
14753
14754/**
14755 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
14756 */
14757HMVMX_EXIT_NSRC_DECL hmR0VmxExitWbinvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14758{
14759 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14760 return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
14761}
14762
14763
14764/**
14765 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
14766 */
14767HMVMX_EXIT_NSRC_DECL hmR0VmxExitInvd(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14768{
14769 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14770 return hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
14771}
14772
14773
14774/**
14775 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
14776 */
14777HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14778{
14779 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14780
14781 /*
14782 * Get the state we need and update the exit history entry.
14783 */
14784 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14785 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
14786
14787 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
14788 AssertRCReturn(rc, rc);
14789
14790 VBOXSTRICTRC rcStrict;
14791 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
14792 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID),
14793 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
14794 if (!pExitRec)
14795 {
14796 /*
14797 * Regular CPUID instruction execution.
14798 */
14799 rcStrict = IEMExecDecodedCpuid(pVCpu, pVmxTransient->cbExitInstr);
14800 if (rcStrict == VINF_SUCCESS)
14801 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
14802 else if (rcStrict == VINF_IEM_RAISED_XCPT)
14803 {
14804 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
14805 rcStrict = VINF_SUCCESS;
14806 }
14807 }
14808 else
14809 {
14810 /*
14811 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
14812 */
14813 int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
14814 AssertRCReturn(rc2, rc2);
14815
14816 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n",
14817 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx));
14818
14819 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
14820 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
14821
14822 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
14823 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
14824 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
14825 }
14826 return rcStrict;
14827}
14828
14829
14830/**
14831 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
14832 */
14833HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14834{
14835 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14836
14837 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14838 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4);
14839 AssertRCReturn(rc, rc);
14840
14841 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_SMXE)
14842 return VINF_EM_RAW_EMULATE_INSTR;
14843
14844 AssertMsgFailed(("hmR0VmxExitGetsec: Unexpected VM-exit when CR4.SMXE is 0.\n"));
14845 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
14846}
14847
14848
14849/**
14850 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
14851 */
14852HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14853{
14854 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14855
14856 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14857 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
14858 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
14859 AssertRCReturn(rc, rc);
14860
14861 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbExitInstr);
14862 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14863 {
14864 /* If we get a spurious VM-exit when TSC offsetting is enabled,
14865 we must reset offsetting on VM-entry. See @bugref{6634}. */
14866 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
14867 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
14868 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
14869 }
14870 else if (rcStrict == VINF_IEM_RAISED_XCPT)
14871 {
14872 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
14873 rcStrict = VINF_SUCCESS;
14874 }
14875 return rcStrict;
14876}
14877
14878
14879/**
14880 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
14881 */
14882HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14883{
14884 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14885
14886 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14887 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
14888 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_TSC_AUX);
14889 AssertRCReturn(rc, rc);
14890
14891 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbExitInstr);
14892 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14893 {
14894 /* If we get a spurious VM-exit when TSC offsetting is enabled,
14895 we must reset offsetting on VM-reentry. See @bugref{6634}. */
14896 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TSC_OFFSETTING)
14897 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
14898 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
14899 }
14900 else if (rcStrict == VINF_IEM_RAISED_XCPT)
14901 {
14902 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
14903 rcStrict = VINF_SUCCESS;
14904 }
14905 return rcStrict;
14906}
14907
14908
14909/**
14910 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
14911 */
14912HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14913{
14914 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14915
14916 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14917 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR0
14918 | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS);
14919 AssertRCReturn(rc, rc);
14920
14921 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
14922 rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));
14923 if (RT_LIKELY(rc == VINF_SUCCESS))
14924 {
14925 rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
14926 Assert(pVmxTransient->cbExitInstr == 2);
14927 }
14928 else
14929 {
14930 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
14931 rc = VERR_EM_INTERPRETER;
14932 }
14933 return rc;
14934}
14935
14936
14937/**
14938 * VM-exit handler for VMCALL (VMX_EXIT_VMCALL). Unconditional VM-exit.
14939 */
14940HMVMX_EXIT_DECL hmR0VmxExitVmcall(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14941{
14942 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14943
14944 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3;
14945 if (EMAreHypercallInstructionsEnabled(pVCpu))
14946 {
14947 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14948 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CR0
14949 | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_EFER);
14950 AssertRCReturn(rc, rc);
14951
14952 /* Perform the hypercall. */
14953 rcStrict = GIMHypercall(pVCpu, &pVCpu->cpum.GstCtx);
14954 if (rcStrict == VINF_SUCCESS)
14955 {
14956 rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
14957 AssertRCReturn(rc, rc);
14958 }
14959 else
14960 Assert( rcStrict == VINF_GIM_R3_HYPERCALL
14961 || rcStrict == VINF_GIM_HYPERCALL_CONTINUING
14962 || RT_FAILURE(rcStrict));
14963
14964 /* If the hypercall changes anything other than guest's general-purpose registers,
14965 we would need to reload the guest changed bits here before VM-entry. */
14966 }
14967 else
14968 Log4Func(("Hypercalls not enabled\n"));
14969
14970 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
14971 if (RT_FAILURE(rcStrict))
14972 {
14973 hmR0VmxSetPendingXcptUD(pVCpu);
14974 rcStrict = VINF_SUCCESS;
14975 }
14976
14977 return rcStrict;
14978}
14979
14980
14981/**
14982 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
14983 */
14984HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
14985{
14986 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
14987 Assert(!pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging || pVCpu->hmr0.s.fUsingDebugLoop);
14988
14989 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
14990 hmR0VmxReadExitQualVmcs(pVmxTransient);
14991 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
14992 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
14993 AssertRCReturn(rc, rc);
14994
14995 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpg(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->uExitQual);
14996
14997 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_PGM_SYNC_CR3)
14998 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
14999 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15000 {
15001 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15002 rcStrict = VINF_SUCCESS;
15003 }
15004 else
15005 AssertMsgFailed(("Unexpected IEMExecDecodedInvlpg(%#RX64) status: %Rrc\n", pVmxTransient->uExitQual,
15006 VBOXSTRICTRC_VAL(rcStrict)));
15007 return rcStrict;
15008}
15009
15010
15011/**
15012 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
15013 */
15014HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
15015{
15016 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15017
15018 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
15019 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15020 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15021 AssertRCReturn(rc, rc);
15022
15023 VBOXSTRICTRC rcStrict = IEMExecDecodedMonitor(pVCpu, pVmxTransient->cbExitInstr);
15024 if (rcStrict == VINF_SUCCESS)
15025 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
15026 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15027 {
15028 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15029 rcStrict = VINF_SUCCESS;
15030 }
15031
15032 return rcStrict;
15033}
15034
15035
15036/**
15037 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
15038 */
15039HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
15040{
15041 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15042
15043 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
15044 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15045 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
15046 AssertRCReturn(rc, rc);
15047
15048 VBOXSTRICTRC rcStrict = IEMExecDecodedMwait(pVCpu, pVmxTransient->cbExitInstr);
15049 if (RT_SUCCESS(rcStrict))
15050 {
15051 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
15052 if (EMMonitorWaitShouldContinue(pVCpu, &pVCpu->cpum.GstCtx))
15053 rcStrict = VINF_SUCCESS;
15054 }
15055
15056 return rcStrict;
15057}
15058
15059
15060/**
15061 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
15062 * VM-exit.
15063 */
15064HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
15065{
15066 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15067 return VINF_EM_RESET;
15068}
15069
15070
15071/**
15072 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
15073 */
15074HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
15075{
15076 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15077
15078 int rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
15079 AssertRCReturn(rc, rc);
15080
15081 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); /* Advancing the RIP above should've imported eflags. */
15082 if (EMShouldContinueAfterHalt(pVCpu, &pVCpu->cpum.GstCtx)) /* Requires eflags. */
15083 rc = VINF_SUCCESS;
15084 else
15085 rc = VINF_EM_HALT;
15086
15087 if (rc != VINF_SUCCESS)
15088 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHltToR3);
15089 return rc;
15090}
15091
15092
15093/**
15094 * VM-exit handler for instructions that result in a \#UD exception delivered to
15095 * the guest.
15096 */
15097HMVMX_EXIT_NSRC_DECL hmR0VmxExitSetPendingXcptUD(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
15098{
15099 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15100 hmR0VmxSetPendingXcptUD(pVCpu);
15101 return VINF_SUCCESS;
15102}
15103
15104
15105/**
15106 * VM-exit handler for expiry of the VMX-preemption timer.
15107 */
15108HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
15109{
15110 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15111
15112 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
15113 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
15114Log12(("hmR0VmxExitPreemptTimer:\n"));
15115
15116 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
15117 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
15118 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
15119 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
15120 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
15121}
15122
15123
15124/**
15125 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
15126 */
15127HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
15128{
15129 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15130
15131 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
15132 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15133 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_CR4);
15134 AssertRCReturn(rc, rc);
15135
15136 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbExitInstr);
15137 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS
15138 : HM_CHANGED_RAISED_XCPT_MASK);
15139
15140 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
15141 bool const fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0();
15142 if (fLoadSaveGuestXcr0 != pVCpu->hmr0.s.fLoadSaveGuestXcr0)
15143 {
15144 pVCpu->hmr0.s.fLoadSaveGuestXcr0 = fLoadSaveGuestXcr0;
15145 hmR0VmxUpdateStartVmFunction(pVCpu);
15146 }
15147
15148 return rcStrict;
15149}
15150
15151
15152/**
15153 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
15154 */
15155HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
15156{
15157 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15158
15159 /** @todo Enable the new code after finding a reliably guest test-case. */
15160#if 1
15161 return VERR_EM_INTERPRETER;
15162#else
15163 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15164 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
15165 hmR0VmxReadExitQualVmcs(pVmxTransient);
15166 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
15167 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
15168 AssertRCReturn(rc, rc);
15169
15170 /* Paranoia. Ensure this has a memory operand. */
15171 Assert(!pVmxTransient->ExitInstrInfo.Inv.u1Cleared0);
15172
15173 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
15174 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
15175 uint64_t const uType = CPUMIsGuestIn64BitCode(pVCpu) ? pVCpu->cpum.GstCtx.aGRegs[iGReg].u64
15176 : pVCpu->cpum.GstCtx.aGRegs[iGReg].u32;
15177
15178 RTGCPTR GCPtrDesc;
15179 HMVMX_DECODE_MEM_OPERAND(pVCpu, pVmxTransient->ExitInstrInfo.u, pVmxTransient->uExitQual, VMXMEMACCESS_READ, &GCPtrDesc);
15180
15181 VBOXSTRICTRC rcStrict = IEMExecDecodedInvpcid(pVCpu, pVmxTransient->cbExitInstr, pVmxTransient->ExitInstrInfo.Inv.iSegReg,
15182 GCPtrDesc, uType);
15183 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15184 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
15185 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15186 {
15187 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15188 rcStrict = VINF_SUCCESS;
15189 }
15190 return rcStrict;
15191#endif
15192}
15193
15194
15195/**
15196 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error
15197 * VM-exit.
15198 */
15199HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
15200{
15201 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
15202 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
15203 AssertRCReturn(rc, rc);
15204
15205 rc = hmR0VmxCheckCachedVmcsCtls(pVCpu, pVmcsInfo, pVmxTransient->fIsNestedGuest);
15206 if (RT_FAILURE(rc))
15207 return rc;
15208
15209 uint32_t const uInvalidReason = hmR0VmxCheckGuestState(pVCpu, pVmcsInfo);
15210 NOREF(uInvalidReason);
15211
15212#ifdef VBOX_STRICT
15213 uint32_t fIntrState;
15214 uint64_t u64Val;
15215 hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
15216 hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
15217 hmR0VmxReadEntryInstrLenVmcs(pVmxTransient);
15218
15219 Log4(("uInvalidReason %u\n", uInvalidReason));
15220 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntInfo));
15221 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
15222 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
15223
15224 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState); AssertRC(rc);
15225 Log4(("VMX_VMCS32_GUEST_INT_STATE %#RX32\n", fIntrState));
15226 rc = VMXReadVmcsNw(VMX_VMCS_GUEST_CR0, &u64Val); AssertRC(rc);
15227 Log4(("VMX_VMCS_GUEST_CR0 %#RX64\n", u64Val));
15228 rc = VMXReadVmcsNw(VMX_VMCS_CTRL_CR0_MASK, &u64Val); AssertRC(rc);
15229 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RX64\n", u64Val));
15230 rc = VMXReadVmcsNw(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u64Val); AssertRC(rc);
15231 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
15232 rc = VMXReadVmcsNw(VMX_VMCS_CTRL_CR4_MASK, &u64Val); AssertRC(rc);
15233 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RX64\n", u64Val));
15234 rc = VMXReadVmcsNw(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u64Val); AssertRC(rc);
15235 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RX64\n", u64Val));
15236 if (pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging)
15237 {
15238 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
15239 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
15240 }
15241 hmR0DumpRegs(pVCpu, HM_DUMP_REG_FLAGS_ALL);
15242#endif
15243
15244 return VERR_VMX_INVALID_GUEST_STATE;
15245}
15246
15247/**
15248 * VM-exit handler for all undefined/unexpected reasons. Should never happen.
15249 */
15250HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrUnexpected(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
15251{
15252 /*
15253 * Cumulative notes of all recognized but unexpected VM-exits.
15254 *
15255 * 1. This does -not- cover scenarios like a page-fault VM-exit occurring when
15256 * nested-paging is used.
15257 *
15258 * 2. Any instruction that causes a VM-exit unconditionally (for e.g. VMXON) must be
15259 * emulated or a #UD must be raised in the guest. Therefore, we should -not- be using
15260 * this function (and thereby stop VM execution) for handling such instructions.
15261 *
15262 *
15263 * VMX_EXIT_INIT_SIGNAL:
15264 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
15265 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these
15266 * VM-exits. However, we should not receive INIT signals VM-exit while executing a VM.
15267 *
15268 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery"
15269 * See Intel spec. 29.3 "VMX Instructions" for "VMXON".
15270 * See Intel spec. "23.8 Restrictions on VMX operation".
15271 *
15272 * VMX_EXIT_SIPI:
15273 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest
15274 * activity state is used. We don't make use of it as our guests don't have direct
15275 * access to the host local APIC.
15276 *
15277 * See Intel spec. 25.3 "Other Causes of VM-exits".
15278 *
15279 * VMX_EXIT_IO_SMI:
15280 * VMX_EXIT_SMI:
15281 * This can only happen if we support dual-monitor treatment of SMI, which can be
15282 * activated by executing VMCALL in VMX root operation. Only an STM (SMM transfer
15283 * monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL in
15284 * VMX root mode or receive an SMI. If we get here, something funny is going on.
15285 *
15286 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment"
15287 * See Intel spec. 25.3 "Other Causes of VM-Exits"
15288 *
15289 * VMX_EXIT_ERR_MSR_LOAD:
15290 * Failures while loading MSRs are part of the VM-entry MSR-load area are unexpected
15291 * and typically indicates a bug in the hypervisor code. We thus cannot not resume
15292 * execution.
15293 *
15294 * See Intel spec. 26.7 "VM-Entry Failures During Or After Loading Guest State".
15295 *
15296 * VMX_EXIT_ERR_MACHINE_CHECK:
15297 * Machine check exceptions indicates a fatal/unrecoverable hardware condition
15298 * including but not limited to system bus, ECC, parity, cache and TLB errors. A
15299 * #MC exception abort class exception is raised. We thus cannot assume a
15300 * reasonable chance of continuing any sort of execution and we bail.
15301 *
15302 * See Intel spec. 15.1 "Machine-check Architecture".
15303 * See Intel spec. 27.1 "Architectural State Before A VM Exit".
15304 *
15305 * VMX_EXIT_PML_FULL:
15306 * VMX_EXIT_VIRTUALIZED_EOI:
15307 * VMX_EXIT_APIC_WRITE:
15308 * We do not currently support any of these features and thus they are all unexpected
15309 * VM-exits.
15310 *
15311 * VMX_EXIT_GDTR_IDTR_ACCESS:
15312 * VMX_EXIT_LDTR_TR_ACCESS:
15313 * VMX_EXIT_RDRAND:
15314 * VMX_EXIT_RSM:
15315 * VMX_EXIT_VMFUNC:
15316 * VMX_EXIT_ENCLS:
15317 * VMX_EXIT_RDSEED:
15318 * VMX_EXIT_XSAVES:
15319 * VMX_EXIT_XRSTORS:
15320 * VMX_EXIT_UMWAIT:
15321 * VMX_EXIT_TPAUSE:
15322 * VMX_EXIT_LOADIWKEY:
15323 * These VM-exits are -not- caused unconditionally by execution of the corresponding
15324 * instruction. Any VM-exit for these instructions indicate a hardware problem,
15325 * unsupported CPU modes (like SMM) or potentially corrupt VMCS controls.
15326 *
15327 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
15328 */
15329 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15330 AssertMsgFailed(("Unexpected VM-exit %u\n", pVmxTransient->uExitReason));
15331 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
15332}
15333
15334
15335/**
15336 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
15337 */
15338HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
15339{
15340 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15341
15342 /** @todo Optimize this: We currently drag in the whole MSR state
15343 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
15344 * MSRs required. That would require changes to IEM and possibly CPUM too.
15345 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
15346 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
15347 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
15348 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
15349 switch (idMsr)
15350 {
15351 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
15352 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
15353 }
15354
15355 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15356 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fImport);
15357 AssertRCReturn(rc, rc);
15358
15359 Log4Func(("ecx=%#RX32\n", idMsr));
15360
15361#ifdef VBOX_STRICT
15362 Assert(!pVmxTransient->fIsNestedGuest);
15363 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
15364 {
15365 if ( hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr)
15366 && idMsr != MSR_K6_EFER)
15367 {
15368 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr));
15369 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
15370 }
15371 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
15372 {
15373 Assert(pVmcsInfo->pvMsrBitmap);
15374 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
15375 if (fMsrpm & VMXMSRPM_ALLOW_RD)
15376 {
15377 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr));
15378 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
15379 }
15380 }
15381 }
15382#endif
15383
15384 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbExitInstr);
15385 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
15386 if (rcStrict == VINF_SUCCESS)
15387 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
15388 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15389 {
15390 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15391 rcStrict = VINF_SUCCESS;
15392 }
15393 else
15394 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ || rcStrict == VINF_EM_TRIPLE_FAULT,
15395 ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
15396
15397 return rcStrict;
15398}
15399
15400
15401/**
15402 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
15403 */
15404HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
15405{
15406 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15407
15408 /** @todo Optimize this: We currently drag in the whole MSR state
15409 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get
15410 * MSRs required. That would require changes to IEM and possibly CPUM too.
15411 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */
15412 uint32_t const idMsr = pVCpu->cpum.GstCtx.ecx;
15413 uint64_t fImport = IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS;
15414
15415 /*
15416 * The FS and GS base MSRs are not part of the above all-MSRs mask.
15417 * Although we don't need to fetch the base as it will be overwritten shortly, while
15418 * loading guest-state we would also load the entire segment register including limit
15419 * and attributes and thus we need to load them here.
15420 */
15421 switch (idMsr)
15422 {
15423 case MSR_K8_FS_BASE: fImport |= CPUMCTX_EXTRN_FS; break;
15424 case MSR_K8_GS_BASE: fImport |= CPUMCTX_EXTRN_GS; break;
15425 }
15426
15427 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
15428 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15429 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fImport);
15430 AssertRCReturn(rc, rc);
15431
15432 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.eax));
15433
15434 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbExitInstr);
15435 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
15436
15437 if (rcStrict == VINF_SUCCESS)
15438 {
15439 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
15440
15441 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
15442 if ( idMsr == MSR_IA32_APICBASE
15443 || ( idMsr >= MSR_IA32_X2APIC_START
15444 && idMsr <= MSR_IA32_X2APIC_END))
15445 {
15446 /*
15447 * We've already saved the APIC related guest-state (TPR) in post-run phase.
15448 * When full APIC register virtualization is implemented we'll have to make
15449 * sure APIC state is saved from the VMCS before IEM changes it.
15450 */
15451 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
15452 }
15453 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
15454 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
15455 else if (idMsr == MSR_K6_EFER)
15456 {
15457 /*
15458 * If the guest touches the EFER MSR we need to update the VM-Entry and VM-Exit controls
15459 * as well, even if it is -not- touching bits that cause paging mode changes (LMA/LME).
15460 * We care about the other bits as well, SCE and NXE. See @bugref{7368}.
15461 */
15462 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
15463 }
15464
15465 /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not used. */
15466 if (!(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS))
15467 {
15468 switch (idMsr)
15469 {
15470 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break;
15471 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break;
15472 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break;
15473 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS); break;
15474 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS); break;
15475 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break;
15476 default:
15477 {
15478 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
15479 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS);
15480 else if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
15481 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
15482 break;
15483 }
15484 }
15485 }
15486#ifdef VBOX_STRICT
15487 else
15488 {
15489 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
15490 switch (idMsr)
15491 {
15492 case MSR_IA32_SYSENTER_CS:
15493 case MSR_IA32_SYSENTER_EIP:
15494 case MSR_IA32_SYSENTER_ESP:
15495 case MSR_K8_FS_BASE:
15496 case MSR_K8_GS_BASE:
15497 {
15498 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr));
15499 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
15500 }
15501
15502 /* Writes to MSRs in auto-load/store area/swapped MSRs, shouldn't cause VM-exits with MSR-bitmaps. */
15503 default:
15504 {
15505 if (hmR0VmxIsAutoLoadGuestMsr(pVmcsInfo, idMsr))
15506 {
15507 /* EFER MSR writes are always intercepted. */
15508 if (idMsr != MSR_K6_EFER)
15509 {
15510 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
15511 idMsr));
15512 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
15513 }
15514 }
15515
15516 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr))
15517 {
15518 Assert(pVmcsInfo->pvMsrBitmap);
15519 uint32_t fMsrpm = CPUMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, idMsr);
15520 if (fMsrpm & VMXMSRPM_ALLOW_WR)
15521 {
15522 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr));
15523 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, idMsr);
15524 }
15525 }
15526 break;
15527 }
15528 }
15529 }
15530#endif /* VBOX_STRICT */
15531 }
15532 else if (rcStrict == VINF_IEM_RAISED_XCPT)
15533 {
15534 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
15535 rcStrict = VINF_SUCCESS;
15536 }
15537 else
15538 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE || rcStrict == VINF_EM_TRIPLE_FAULT,
15539 ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
15540
15541 return rcStrict;
15542}
15543
15544
15545/**
15546 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
15547 */
15548HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
15549{
15550 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15551
15552 /** @todo The guest has likely hit a contended spinlock. We might want to
15553 * poke a schedule different guest VCPU. */
15554 int rc = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
15555 if (RT_SUCCESS(rc))
15556 return VINF_EM_RAW_INTERRUPT;
15557
15558 AssertMsgFailed(("hmR0VmxExitPause: Failed to increment RIP. rc=%Rrc\n", rc));
15559 return rc;
15560}
15561
15562
15563/**
15564 * VM-exit handler for when the TPR value is lowered below the specified
15565 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
15566 */
15567HMVMX_EXIT_NSRC_DECL hmR0VmxExitTprBelowThreshold(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
15568{
15569 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15570 Assert(pVmxTransient->pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
15571
15572 /*
15573 * The TPR shadow would've been synced with the APIC TPR in the post-run phase.
15574 * We'll re-evaluate pending interrupts and inject them before the next VM
15575 * entry so we can just continue execution here.
15576 */
15577 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
15578 return VINF_SUCCESS;
15579}
15580
15581
15582/**
15583 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
15584 * VM-exit.
15585 *
15586 * @retval VINF_SUCCESS when guest execution can continue.
15587 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
15588 * @retval VERR_EM_RESCHEDULE_REM when we need to return to ring-3 due to
15589 * incompatible guest state for VMX execution (real-on-v86 case).
15590 */
15591HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
15592{
15593 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15594 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
15595
15596 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
15597 hmR0VmxReadExitQualVmcs(pVmxTransient);
15598 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15599
15600 VBOXSTRICTRC rcStrict;
15601 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
15602 uint64_t const uExitQual = pVmxTransient->uExitQual;
15603 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQual);
15604 switch (uAccessType)
15605 {
15606 /*
15607 * MOV to CRx.
15608 */
15609 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
15610 {
15611 /*
15612 * When PAE paging is used, the CPU will reload PAE PDPTEs from CR3 when the guest
15613 * changes certain bits even in CR0, CR4 (and not just CR3). We are currently fine
15614 * since IEM_CPUMCTX_EXTRN_MUST_MASK (used below) includes CR3 which will import
15615 * PAE PDPTEs as well.
15616 */
15617 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
15618 AssertRCReturn(rc, rc);
15619
15620 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
15621 uint32_t const uOldCr0 = pVCpu->cpum.GstCtx.cr0;
15622 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
15623 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
15624
15625 /*
15626 * MOV to CR3 only cause a VM-exit when one or more of the following are true:
15627 * - When nested paging isn't used.
15628 * - If the guest doesn't have paging enabled (intercept CR3 to update shadow page tables).
15629 * - We are executing in the VM debug loop.
15630 */
15631 Assert( iCrReg != 3
15632 || !pVM->hmr0.s.fNestedPaging
15633 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
15634 || pVCpu->hmr0.s.fUsingDebugLoop);
15635
15636 /* MOV to CR8 writes only cause VM-exits when TPR shadow is not used. */
15637 Assert( iCrReg != 8
15638 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
15639
15640 rcStrict = hmR0VmxExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
15641 AssertMsg( rcStrict == VINF_SUCCESS
15642 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
15643
15644 /*
15645 * This is a kludge for handling switches back to real mode when we try to use
15646 * V86 mode to run real mode code directly. Problem is that V86 mode cannot
15647 * deal with special selector values, so we have to return to ring-3 and run
15648 * there till the selector values are V86 mode compatible.
15649 *
15650 * Note! Using VINF_EM_RESCHEDULE_REM here rather than VINF_EM_RESCHEDULE since the
15651 * latter is an alias for VINF_IEM_RAISED_XCPT which is asserted at the end of
15652 * this function.
15653 */
15654 if ( iCrReg == 0
15655 && rcStrict == VINF_SUCCESS
15656 && !pVM->hmr0.s.vmx.fUnrestrictedGuest
15657 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)
15658 && (uOldCr0 & X86_CR0_PE)
15659 && !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
15660 {
15661 /** @todo Check selectors rather than returning all the time. */
15662 Assert(!pVmxTransient->fIsNestedGuest);
15663 Log4Func(("CR0 write, back to real mode -> VINF_EM_RESCHEDULE_REM\n"));
15664 rcStrict = VINF_EM_RESCHEDULE_REM;
15665 }
15666 break;
15667 }
15668
15669 /*
15670 * MOV from CRx.
15671 */
15672 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
15673 {
15674 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(uExitQual);
15675 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(uExitQual);
15676
15677 /*
15678 * MOV from CR3 only cause a VM-exit when one or more of the following are true:
15679 * - When nested paging isn't used.
15680 * - If the guest doesn't have paging enabled (pass guest's CR3 rather than our identity mapped CR3).
15681 * - We are executing in the VM debug loop.
15682 */
15683 Assert( iCrReg != 3
15684 || !pVM->hmr0.s.fNestedPaging
15685 || !CPUMIsGuestPagingEnabledEx(&pVCpu->cpum.GstCtx)
15686 || pVCpu->hmr0.s.fLeaveDone);
15687
15688 /* MOV from CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
15689 Assert( iCrReg != 8
15690 || !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW));
15691
15692 rcStrict = hmR0VmxExitMovFromCrX(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
15693 break;
15694 }
15695
15696 /*
15697 * CLTS (Clear Task-Switch Flag in CR0).
15698 */
15699 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
15700 {
15701 rcStrict = hmR0VmxExitClts(pVCpu, pVmcsInfo, pVmxTransient->cbExitInstr);
15702 break;
15703 }
15704
15705 /*
15706 * LMSW (Load Machine-Status Word into CR0).
15707 * LMSW cannot clear CR0.PE, so no fRealOnV86Active kludge needed here.
15708 */
15709 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW:
15710 {
15711 RTGCPTR GCPtrEffDst;
15712 uint8_t const cbInstr = pVmxTransient->cbExitInstr;
15713 uint16_t const uMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQual);
15714 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(uExitQual);
15715 if (fMemOperand)
15716 {
15717 hmR0VmxReadGuestLinearAddrVmcs(pVmxTransient);
15718 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
15719 }
15720 else
15721 GCPtrEffDst = NIL_RTGCPTR;
15722 rcStrict = hmR0VmxExitLmsw(pVCpu, pVmcsInfo, cbInstr, uMsw, GCPtrEffDst);
15723 break;
15724 }
15725
15726 default:
15727 {
15728 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
15729 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
15730 }
15731 }
15732
15733 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS))
15734 == (HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS));
15735 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
15736
15737 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
15738 NOREF(pVM);
15739 return rcStrict;
15740}
15741
15742
15743/**
15744 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
15745 * VM-exit.
15746 */
15747HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
15748{
15749 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15750 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
15751
15752 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
15753 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
15754 hmR0VmxReadExitQualVmcs(pVmxTransient);
15755 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
15756 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK
15757 | CPUMCTX_EXTRN_EFER);
15758 /* EFER MSR also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
15759 AssertRCReturn(rc, rc);
15760
15761 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
15762 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
15763 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
15764 bool const fIOWrite = (VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_IO_DIRECTION_OUT);
15765 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
15766 bool const fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF);
15767 bool const fDbgStepping = pVCpu->hm.s.fSingleInstruction;
15768 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
15769
15770 /*
15771 * Update exit history to see if this exit can be optimized.
15772 */
15773 VBOXSTRICTRC rcStrict;
15774 PCEMEXITREC pExitRec = NULL;
15775 if ( !fGstStepping
15776 && !fDbgStepping)
15777 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
15778 !fIOString
15779 ? !fIOWrite
15780 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ)
15781 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE)
15782 : !fIOWrite
15783 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ)
15784 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE),
15785 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
15786 if (!pExitRec)
15787 {
15788 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
15789 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */
15790
15791 uint32_t const cbValue = s_aIOSizes[uIOSize];
15792 uint32_t const cbInstr = pVmxTransient->cbExitInstr;
15793 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
15794 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
15795 if (fIOString)
15796 {
15797 /*
15798 * INS/OUTS - I/O String instruction.
15799 *
15800 * Use instruction-information if available, otherwise fall back on
15801 * interpreting the instruction.
15802 */
15803 Log4Func(("cs:rip=%#04x:%#RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
15804 AssertReturn(pCtx->dx == uIOPort, VERR_VMX_IPE_2);
15805 bool const fInsOutsInfo = RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS);
15806 if (fInsOutsInfo)
15807 {
15808 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
15809 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3);
15810 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
15811 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
15812 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual);
15813 if (fIOWrite)
15814 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
15815 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/);
15816 else
15817 {
15818 /*
15819 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES.
15820 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS.
15821 * See Intel Instruction spec. for "INS".
15822 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS".
15823 */
15824 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/);
15825 }
15826 }
15827 else
15828 rcStrict = IEMExecOne(pVCpu);
15829
15830 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
15831 fUpdateRipAlready = true;
15832 }
15833 else
15834 {
15835 /*
15836 * IN/OUT - I/O instruction.
15837 */
15838 Log4Func(("cs:rip=%04x:%08RX64 %#06x/%u %c\n", pCtx->cs.Sel, pCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
15839 uint32_t const uAndVal = s_aIOOpAnd[uIOSize];
15840 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual));
15841 if (fIOWrite)
15842 {
15843 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pCtx->eax & uAndVal, cbValue);
15844 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
15845 if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
15846 && !pCtx->eflags.Bits.u1TF)
15847 rcStrict = EMRZSetPendingIoPortWrite(pVCpu, uIOPort, cbInstr, cbValue, pCtx->eax & uAndVal);
15848 }
15849 else
15850 {
15851 uint32_t u32Result = 0;
15852 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
15853 if (IOM_SUCCESS(rcStrict))
15854 {
15855 /* Save result of I/O IN instr. in AL/AX/EAX. */
15856 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Result & uAndVal);
15857 }
15858 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
15859 && !pCtx->eflags.Bits.u1TF)
15860 rcStrict = EMRZSetPendingIoPortRead(pVCpu, uIOPort, cbInstr, cbValue);
15861 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
15862 }
15863 }
15864
15865 if (IOM_SUCCESS(rcStrict))
15866 {
15867 if (!fUpdateRipAlready)
15868 {
15869 hmR0VmxAdvanceGuestRipBy(pVCpu, cbInstr);
15870 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP);
15871 }
15872
15873 /*
15874 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru
15875 * while booting Fedora 17 64-bit guest.
15876 *
15877 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
15878 */
15879 if (fIOString)
15880 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
15881
15882 /*
15883 * If any I/O breakpoints are armed, we need to check if one triggered
15884 * and take appropriate action.
15885 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
15886 */
15887 rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_DR7);
15888 AssertRCReturn(rc, rc);
15889
15890 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
15891 * execution engines about whether hyper BPs and such are pending. */
15892 uint32_t const uDr7 = pCtx->dr[7];
15893 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
15894 && X86_DR7_ANY_RW_IO(uDr7)
15895 && (pCtx->cr4 & X86_CR4_DE))
15896 || DBGFBpIsHwIoArmed(pVM)))
15897 {
15898 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
15899
15900 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
15901 VMMRZCallRing3Disable(pVCpu);
15902 HM_DISABLE_PREEMPT(pVCpu);
15903
15904 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */);
15905
15906 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, uIOPort, cbValue);
15907 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
15908 {
15909 /* Raise #DB. */
15910 if (fIsGuestDbgActive)
15911 ASMSetDR6(pCtx->dr[6]);
15912 if (pCtx->dr[7] != uDr7)
15913 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR7;
15914
15915 hmR0VmxSetPendingXcptDB(pVCpu);
15916 }
15917 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST],
15918 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */
15919 else if ( rcStrict2 != VINF_SUCCESS
15920 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
15921 rcStrict = rcStrict2;
15922 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE);
15923
15924 HM_RESTORE_PREEMPT();
15925 VMMRZCallRing3Enable(pVCpu);
15926 }
15927 }
15928
15929#ifdef VBOX_STRICT
15930 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
15931 || rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
15932 Assert(!fIOWrite);
15933 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
15934 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
15935 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
15936 Assert(fIOWrite);
15937 else
15938 {
15939# if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
15940 * statuses, that the VMM device and some others may return. See
15941 * IOM_SUCCESS() for guidance. */
15942 AssertMsg( RT_FAILURE(rcStrict)
15943 || rcStrict == VINF_SUCCESS
15944 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
15945 || rcStrict == VINF_EM_DBG_BREAKPOINT
15946 || rcStrict == VINF_EM_RAW_GUEST_TRAP
15947 || rcStrict == VINF_EM_RAW_TO_R3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
15948# endif
15949 }
15950#endif
15951 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
15952 }
15953 else
15954 {
15955 /*
15956 * Frequent exit or something needing probing. Get state and call EMHistoryExec.
15957 */
15958 int rc2 = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
15959 AssertRCReturn(rc2, rc2);
15960 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead
15961 : fIOWrite ? &pVCpu->hm.s.StatExitIOStringWrite : &pVCpu->hm.s.StatExitIOStringRead);
15962 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n",
15963 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
15964 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQual) ? "REP " : "",
15965 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOSize));
15966
15967 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
15968 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
15969
15970 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
15971 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
15972 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
15973 }
15974 return rcStrict;
15975}
15976
15977
15978/**
15979 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
15980 * VM-exit.
15981 */
15982HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
15983{
15984 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
15985
15986 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
15987 hmR0VmxReadExitQualVmcs(pVmxTransient);
15988 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
15989 {
15990 hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
15991 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
15992 {
15993 uint32_t uErrCode;
15994 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uIdtVectoringInfo))
15995 {
15996 hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
15997 uErrCode = pVmxTransient->uIdtVectoringErrorCode;
15998 }
15999 else
16000 uErrCode = 0;
16001
16002 RTGCUINTPTR GCPtrFaultAddress;
16003 if (VMX_IDT_VECTORING_INFO_IS_XCPT_PF(pVmxTransient->uIdtVectoringInfo))
16004 GCPtrFaultAddress = pVCpu->cpum.GstCtx.cr2;
16005 else
16006 GCPtrFaultAddress = 0;
16007
16008 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16009
16010 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
16011 pVmxTransient->cbExitInstr, uErrCode, GCPtrFaultAddress);
16012
16013 Log4Func(("Pending event. uIntType=%#x uVector=%#x\n", VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo),
16014 VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo)));
16015 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
16016 return VINF_EM_RAW_INJECT_TRPM_EVENT;
16017 }
16018 }
16019
16020 /* Fall back to the interpreter to emulate the task-switch. */
16021 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
16022 return VERR_EM_INTERPRETER;
16023}
16024
16025
16026/**
16027 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
16028 */
16029HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16030{
16031 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16032
16033 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
16034 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
16035 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
16036 AssertRC(rc);
16037 return VINF_EM_DBG_STEPPED;
16038}
16039
16040
16041/**
16042 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
16043 */
16044HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16045{
16046 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16047 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
16048
16049 hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
16050 hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
16051 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16052 hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
16053 hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
16054
16055 /*
16056 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
16057 */
16058 VBOXSTRICTRC rcStrict = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
16059 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
16060 {
16061 /* For some crazy guest, if an event delivery causes an APIC-access VM-exit, go to instruction emulation. */
16062 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
16063 {
16064 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
16065 return VINF_EM_RAW_INJECT_TRPM_EVENT;
16066 }
16067 }
16068 else
16069 {
16070 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
16071 return rcStrict;
16072 }
16073
16074 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */
16075 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
16076 hmR0VmxReadExitQualVmcs(pVmxTransient);
16077 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
16078 AssertRCReturn(rc, rc);
16079
16080 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
16081 uint32_t const uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual);
16082 switch (uAccessType)
16083 {
16084 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
16085 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
16086 {
16087 AssertMsg( !(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
16088 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual) != XAPIC_OFF_TPR,
16089 ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
16090
16091 RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64GstMsrApicBase; /* Always up-to-date, as it is not part of the VMCS. */
16092 GCPhys &= PAGE_BASE_GC_MASK;
16093 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual);
16094 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,
16095 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual)));
16096
16097 rcStrict = IOMR0MmioPhysHandler(pVCpu->CTX_SUFF(pVM), pVCpu,
16098 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, GCPhys);
16099 Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
16100 if ( rcStrict == VINF_SUCCESS
16101 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
16102 || rcStrict == VERR_PAGE_NOT_PRESENT)
16103 {
16104 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
16105 | HM_CHANGED_GUEST_APIC_TPR);
16106 rcStrict = VINF_SUCCESS;
16107 }
16108 break;
16109 }
16110
16111 default:
16112 {
16113 Log4Func(("uAccessType=%#x\n", uAccessType));
16114 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
16115 break;
16116 }
16117 }
16118
16119 if (rcStrict != VINF_SUCCESS)
16120 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchApicAccessToR3);
16121 return rcStrict;
16122}
16123
16124
16125/**
16126 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
16127 * VM-exit.
16128 */
16129HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16130{
16131 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16132 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
16133
16134 /*
16135 * We might also get this VM-exit if the nested-guest isn't intercepting MOV DRx accesses.
16136 * In such a case, rather than disabling MOV DRx intercepts and resuming execution, we
16137 * must emulate the MOV DRx access.
16138 */
16139 if (!pVmxTransient->fIsNestedGuest)
16140 {
16141 /* We should -not- get this VM-exit if the guest's debug registers were active. */
16142 if (pVmxTransient->fWasGuestDebugStateActive)
16143 {
16144 AssertMsgFailed(("Unexpected MOV DRx exit\n"));
16145 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient->uExitReason);
16146 }
16147
16148 if ( !pVCpu->hm.s.fSingleInstruction
16149 && !pVmxTransient->fWasHyperDebugStateActive)
16150 {
16151 Assert(!DBGFIsStepping(pVCpu));
16152 Assert(pVmcsInfo->u32XcptBitmap & RT_BIT(X86_XCPT_DB));
16153
16154 /* Don't intercept MOV DRx any more. */
16155 pVmcsInfo->u32ProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
16156 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);
16157 AssertRC(rc);
16158
16159 /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
16160 VMMRZCallRing3Disable(pVCpu);
16161 HM_DISABLE_PREEMPT(pVCpu);
16162
16163 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
16164 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
16165 Assert(CPUMIsGuestDebugStateActive(pVCpu));
16166
16167 HM_RESTORE_PREEMPT();
16168 VMMRZCallRing3Enable(pVCpu);
16169
16170#ifdef VBOX_WITH_STATISTICS
16171 hmR0VmxReadExitQualVmcs(pVmxTransient);
16172 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
16173 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
16174 else
16175 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
16176#endif
16177 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
16178 return VINF_SUCCESS;
16179 }
16180 }
16181
16182 /*
16183 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER MSR, CS.
16184 * The EFER MSR is always up-to-date.
16185 * Update the segment registers and DR7 from the CPU.
16186 */
16187 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
16188 hmR0VmxReadExitQualVmcs(pVmxTransient);
16189 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_SREG_MASK | CPUMCTX_EXTRN_DR7);
16190 AssertRCReturn(rc, rc);
16191 Log4Func(("cs:rip=%#04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));
16192
16193 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
16194 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE)
16195 {
16196 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
16197 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual),
16198 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual));
16199 if (RT_SUCCESS(rc))
16200 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
16201 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
16202 }
16203 else
16204 {
16205 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
16206 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQual),
16207 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual));
16208 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
16209 }
16210
16211 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
16212 if (RT_SUCCESS(rc))
16213 {
16214 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pVmxTransient);
16215 AssertRCReturn(rc2, rc2);
16216 return VINF_SUCCESS;
16217 }
16218 return rc;
16219}
16220
16221
16222/**
16223 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
16224 * Conditional VM-exit.
16225 */
16226HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16227{
16228 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16229 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
16230
16231 hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
16232 hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
16233 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16234 hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
16235 hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
16236
16237 /*
16238 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
16239 */
16240 VBOXSTRICTRC rcStrict = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
16241 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
16242 {
16243 /*
16244 * In the unlikely case where delivering an event causes an EPT misconfig (MMIO), go back to
16245 * instruction emulation to inject the original event. Otherwise, injecting the original event
16246 * using hardware-assisted VMX would trigger the same EPT misconfig VM-exit again.
16247 */
16248 if (!pVCpu->hm.s.Event.fPending)
16249 { /* likely */ }
16250 else
16251 {
16252 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterpret);
16253#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
16254 /** @todo NSTVMX: Think about how this should be handled. */
16255 if (pVmxTransient->fIsNestedGuest)
16256 return VERR_VMX_IPE_3;
16257#endif
16258 return VINF_EM_RAW_INJECT_TRPM_EVENT;
16259 }
16260 }
16261 else
16262 {
16263 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
16264 return rcStrict;
16265 }
16266
16267 /*
16268 * Get sufficient state and update the exit history entry.
16269 */
16270 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
16271 hmR0VmxReadGuestPhysicalAddrVmcs(pVmxTransient);
16272 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
16273 AssertRCReturn(rc, rc);
16274
16275 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
16276 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu,
16277 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO),
16278 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
16279 if (!pExitRec)
16280 {
16281 /*
16282 * If we succeed, resume guest execution.
16283 * If we fail in interpreting the instruction because we couldn't get the guest physical address
16284 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
16285 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
16286 * weird case. See @bugref{6043}.
16287 */
16288 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
16289 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
16290/** @todo bird: We can probably just go straight to IOM here and assume that
16291 * it's MMIO, then fall back on PGM if that hunch didn't work out so
16292 * well. However, we need to address that aliasing workarounds that
16293 * PGMR0Trap0eHandlerNPMisconfig implements. So, some care is needed.
16294 *
16295 * Might also be interesting to see if we can get this done more or
16296 * less locklessly inside IOM. Need to consider the lookup table
16297 * updating and use a bit more carefully first (or do all updates via
16298 * rendezvous) */
16299 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
16300 Log4Func(("At %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pCtx->rip, VBOXSTRICTRC_VAL(rcStrict)));
16301 if ( rcStrict == VINF_SUCCESS
16302 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
16303 || rcStrict == VERR_PAGE_NOT_PRESENT)
16304 {
16305 /* Successfully handled MMIO operation. */
16306 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
16307 | HM_CHANGED_GUEST_APIC_TPR);
16308 rcStrict = VINF_SUCCESS;
16309 }
16310 }
16311 else
16312 {
16313 /*
16314 * Frequent exit or something needing probing. Call EMHistoryExec.
16315 */
16316 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n",
16317 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys));
16318
16319 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0);
16320 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
16321
16322 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
16323 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
16324 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
16325 }
16326 return rcStrict;
16327}
16328
16329
16330/**
16331 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
16332 * VM-exit.
16333 */
16334HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16335{
16336 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16337 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
16338
16339 hmR0VmxReadExitQualVmcs(pVmxTransient);
16340 hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
16341 hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
16342 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16343 hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
16344 hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
16345
16346 /*
16347 * If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly.
16348 */
16349 VBOXSTRICTRC rcStrict = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pVmxTransient);
16350 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
16351 {
16352 /*
16353 * If delivery of an event causes an EPT violation (true nested #PF and not MMIO),
16354 * we shall resolve the nested #PF and re-inject the original event.
16355 */
16356 if (pVCpu->hm.s.Event.fPending)
16357 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectReflectNPF);
16358 }
16359 else
16360 {
16361 Assert(rcStrict != VINF_HM_DOUBLE_FAULT);
16362 return rcStrict;
16363 }
16364
16365 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
16366 hmR0VmxReadGuestPhysicalAddrVmcs(pVmxTransient);
16367 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
16368 AssertRCReturn(rc, rc);
16369
16370 RTGCPHYS const GCPhys = pVmxTransient->uGuestPhysicalAddr;
16371 uint64_t const uExitQual = pVmxTransient->uExitQual;
16372 AssertMsg(((pVmxTransient->uExitQual >> 7) & 3) != 2, ("%#RX64", uExitQual));
16373
16374 RTGCUINT uErrorCode = 0;
16375 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH)
16376 uErrorCode |= X86_TRAP_PF_ID;
16377 if (uExitQual & VMX_EXIT_QUAL_EPT_ACCESS_WRITE)
16378 uErrorCode |= X86_TRAP_PF_RW;
16379 if (uExitQual & (VMX_EXIT_QUAL_EPT_ENTRY_READ | VMX_EXIT_QUAL_EPT_ENTRY_WRITE | VMX_EXIT_QUAL_EPT_ENTRY_EXECUTE))
16380 uErrorCode |= X86_TRAP_PF_P;
16381
16382 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
16383 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
16384 Log4Func(("at %#RX64 (%#RX64 errcode=%#x) cs:rip=%#04x:%#RX64\n", GCPhys, uExitQual, uErrorCode, pCtx->cs.Sel, pCtx->rip));
16385
16386 /*
16387 * Handle the pagefault trap for the nested shadow table.
16388 */
16389 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
16390 rcStrict = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pCtx), GCPhys);
16391 TRPMResetTrap(pVCpu);
16392
16393 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
16394 if ( rcStrict == VINF_SUCCESS
16395 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT
16396 || rcStrict == VERR_PAGE_NOT_PRESENT)
16397 {
16398 /* Successfully synced our nested page tables. */
16399 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
16400 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS);
16401 return VINF_SUCCESS;
16402 }
16403
16404 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
16405 return rcStrict;
16406}
16407
16408
16409#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
16410/**
16411 * VM-exit handler for VMCLEAR (VMX_EXIT_VMCLEAR). Unconditional VM-exit.
16412 */
16413HMVMX_EXIT_DECL hmR0VmxExitVmclear(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16414{
16415 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16416
16417 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16418 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
16419 hmR0VmxReadExitQualVmcs(pVmxTransient);
16420 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
16421 | CPUMCTX_EXTRN_HWVIRT
16422 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
16423 AssertRCReturn(rc, rc);
16424
16425 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
16426
16427 VMXVEXITINFO ExitInfo;
16428 RT_ZERO(ExitInfo);
16429 ExitInfo.uReason = pVmxTransient->uExitReason;
16430 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16431 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
16432 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
16433 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
16434
16435 VBOXSTRICTRC rcStrict = IEMExecDecodedVmclear(pVCpu, &ExitInfo);
16436 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
16437 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
16438 else if (rcStrict == VINF_IEM_RAISED_XCPT)
16439 {
16440 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
16441 rcStrict = VINF_SUCCESS;
16442 }
16443 return rcStrict;
16444}
16445
16446
16447/**
16448 * VM-exit handler for VMLAUNCH (VMX_EXIT_VMLAUNCH). Unconditional VM-exit.
16449 */
16450HMVMX_EXIT_DECL hmR0VmxExitVmlaunch(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16451{
16452 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16453
16454 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMLAUNCH,
16455 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
16456 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16457 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
16458 AssertRCReturn(rc, rc);
16459
16460 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
16461
16462 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitVmentry, z);
16463 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMLAUNCH);
16464 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitVmentry, z);
16465 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
16466 {
16467 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
16468 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
16469 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
16470 }
16471 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
16472 return rcStrict;
16473}
16474
16475
16476/**
16477 * VM-exit handler for VMPTRLD (VMX_EXIT_VMPTRLD). Unconditional VM-exit.
16478 */
16479HMVMX_EXIT_DECL hmR0VmxExitVmptrld(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16480{
16481 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16482
16483 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16484 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
16485 hmR0VmxReadExitQualVmcs(pVmxTransient);
16486 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
16487 | CPUMCTX_EXTRN_HWVIRT
16488 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
16489 AssertRCReturn(rc, rc);
16490
16491 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
16492
16493 VMXVEXITINFO ExitInfo;
16494 RT_ZERO(ExitInfo);
16495 ExitInfo.uReason = pVmxTransient->uExitReason;
16496 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16497 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
16498 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
16499 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
16500
16501 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrld(pVCpu, &ExitInfo);
16502 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
16503 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
16504 else if (rcStrict == VINF_IEM_RAISED_XCPT)
16505 {
16506 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
16507 rcStrict = VINF_SUCCESS;
16508 }
16509 return rcStrict;
16510}
16511
16512
16513/**
16514 * VM-exit handler for VMPTRST (VMX_EXIT_VMPTRST). Unconditional VM-exit.
16515 */
16516HMVMX_EXIT_DECL hmR0VmxExitVmptrst(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16517{
16518 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16519
16520 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16521 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
16522 hmR0VmxReadExitQualVmcs(pVmxTransient);
16523 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
16524 | CPUMCTX_EXTRN_HWVIRT
16525 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
16526 AssertRCReturn(rc, rc);
16527
16528 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
16529
16530 VMXVEXITINFO ExitInfo;
16531 RT_ZERO(ExitInfo);
16532 ExitInfo.uReason = pVmxTransient->uExitReason;
16533 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16534 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
16535 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
16536 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
16537
16538 VBOXSTRICTRC rcStrict = IEMExecDecodedVmptrst(pVCpu, &ExitInfo);
16539 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
16540 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
16541 else if (rcStrict == VINF_IEM_RAISED_XCPT)
16542 {
16543 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
16544 rcStrict = VINF_SUCCESS;
16545 }
16546 return rcStrict;
16547}
16548
16549
16550/**
16551 * VM-exit handler for VMREAD (VMX_EXIT_VMREAD). Conditional VM-exit.
16552 */
16553HMVMX_EXIT_DECL hmR0VmxExitVmread(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16554{
16555 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16556
16557 /*
16558 * Strictly speaking we should not get VMREAD VM-exits for shadow VMCS fields and
16559 * thus might not need to import the shadow VMCS state, it's safer just in case
16560 * code elsewhere dares look at unsynced VMCS fields.
16561 */
16562 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16563 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
16564 hmR0VmxReadExitQualVmcs(pVmxTransient);
16565 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
16566 | CPUMCTX_EXTRN_HWVIRT
16567 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
16568 AssertRCReturn(rc, rc);
16569
16570 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
16571
16572 VMXVEXITINFO ExitInfo;
16573 RT_ZERO(ExitInfo);
16574 ExitInfo.uReason = pVmxTransient->uExitReason;
16575 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16576 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
16577 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
16578 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
16579 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_WRITE, &ExitInfo.GCPtrEffAddr);
16580
16581 VBOXSTRICTRC rcStrict = IEMExecDecodedVmread(pVCpu, &ExitInfo);
16582 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
16583 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
16584 else if (rcStrict == VINF_IEM_RAISED_XCPT)
16585 {
16586 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
16587 rcStrict = VINF_SUCCESS;
16588 }
16589 return rcStrict;
16590}
16591
16592
16593/**
16594 * VM-exit handler for VMRESUME (VMX_EXIT_VMRESUME). Unconditional VM-exit.
16595 */
16596HMVMX_EXIT_DECL hmR0VmxExitVmresume(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16597{
16598 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16599
16600 /* Import the entire VMCS state for now as we would be switching VMCS on successful VMRESUME,
16601 otherwise we could import just IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK. */
16602 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16603 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
16604 AssertRCReturn(rc, rc);
16605
16606 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
16607
16608 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitVmentry, z);
16609 VBOXSTRICTRC rcStrict = IEMExecDecodedVmlaunchVmresume(pVCpu, pVmxTransient->cbExitInstr, VMXINSTRID_VMRESUME);
16610 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitVmentry, z);
16611 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
16612 {
16613 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
16614 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
16615 rcStrict = VINF_VMX_VMLAUNCH_VMRESUME;
16616 }
16617 Assert(rcStrict != VINF_IEM_RAISED_XCPT);
16618 return rcStrict;
16619}
16620
16621
16622/**
16623 * VM-exit handler for VMWRITE (VMX_EXIT_VMWRITE). Conditional VM-exit.
16624 */
16625HMVMX_EXIT_DECL hmR0VmxExitVmwrite(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16626{
16627 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16628
16629 /*
16630 * Although we should not get VMWRITE VM-exits for shadow VMCS fields, since our HM hook
16631 * gets invoked when IEM's VMWRITE instruction emulation modifies the current VMCS and it
16632 * flags re-loading the entire shadow VMCS, we should save the entire shadow VMCS here.
16633 */
16634 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16635 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
16636 hmR0VmxReadExitQualVmcs(pVmxTransient);
16637 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
16638 | CPUMCTX_EXTRN_HWVIRT
16639 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
16640 AssertRCReturn(rc, rc);
16641
16642 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
16643
16644 VMXVEXITINFO ExitInfo;
16645 RT_ZERO(ExitInfo);
16646 ExitInfo.uReason = pVmxTransient->uExitReason;
16647 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16648 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
16649 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
16650 if (!ExitInfo.InstrInfo.VmreadVmwrite.fIsRegOperand)
16651 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
16652
16653 VBOXSTRICTRC rcStrict = IEMExecDecodedVmwrite(pVCpu, &ExitInfo);
16654 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
16655 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
16656 else if (rcStrict == VINF_IEM_RAISED_XCPT)
16657 {
16658 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
16659 rcStrict = VINF_SUCCESS;
16660 }
16661 return rcStrict;
16662}
16663
16664
16665/**
16666 * VM-exit handler for VMXOFF (VMX_EXIT_VMXOFF). Unconditional VM-exit.
16667 */
16668HMVMX_EXIT_DECL hmR0VmxExitVmxoff(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16669{
16670 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16671
16672 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16673 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR4
16674 | CPUMCTX_EXTRN_HWVIRT
16675 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
16676 AssertRCReturn(rc, rc);
16677
16678 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
16679
16680 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxoff(pVCpu, pVmxTransient->cbExitInstr);
16681 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
16682 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_HWVIRT);
16683 else if (rcStrict == VINF_IEM_RAISED_XCPT)
16684 {
16685 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
16686 rcStrict = VINF_SUCCESS;
16687 }
16688 return rcStrict;
16689}
16690
16691
16692/**
16693 * VM-exit handler for VMXON (VMX_EXIT_VMXON). Unconditional VM-exit.
16694 */
16695HMVMX_EXIT_DECL hmR0VmxExitVmxon(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16696{
16697 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16698
16699 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16700 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
16701 hmR0VmxReadExitQualVmcs(pVmxTransient);
16702 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
16703 | CPUMCTX_EXTRN_HWVIRT
16704 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
16705 AssertRCReturn(rc, rc);
16706
16707 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
16708
16709 VMXVEXITINFO ExitInfo;
16710 RT_ZERO(ExitInfo);
16711 ExitInfo.uReason = pVmxTransient->uExitReason;
16712 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16713 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
16714 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
16715 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
16716
16717 VBOXSTRICTRC rcStrict = IEMExecDecodedVmxon(pVCpu, &ExitInfo);
16718 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
16719 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_HWVIRT);
16720 else if (rcStrict == VINF_IEM_RAISED_XCPT)
16721 {
16722 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
16723 rcStrict = VINF_SUCCESS;
16724 }
16725 return rcStrict;
16726}
16727
16728
16729/**
16730 * VM-exit handler for INVVPID (VMX_EXIT_INVVPID). Unconditional VM-exit.
16731 */
16732HMVMX_EXIT_DECL hmR0VmxExitInvvpid(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16733{
16734 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16735
16736 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16737 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
16738 hmR0VmxReadExitQualVmcs(pVmxTransient);
16739 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_SREG_MASK
16740 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK);
16741 AssertRCReturn(rc, rc);
16742
16743 HMVMX_CHECK_EXIT_DUE_TO_VMX_INSTR(pVCpu, pVmxTransient->uExitReason);
16744
16745 VMXVEXITINFO ExitInfo;
16746 RT_ZERO(ExitInfo);
16747 ExitInfo.uReason = pVmxTransient->uExitReason;
16748 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16749 ExitInfo.InstrInfo.u = pVmxTransient->ExitInstrInfo.u;
16750 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
16751 HMVMX_DECODE_MEM_OPERAND(pVCpu, ExitInfo.InstrInfo.u, ExitInfo.u64Qual, VMXMEMACCESS_READ, &ExitInfo.GCPtrEffAddr);
16752
16753 VBOXSTRICTRC rcStrict = IEMExecDecodedInvvpid(pVCpu, &ExitInfo);
16754 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
16755 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);
16756 else if (rcStrict == VINF_IEM_RAISED_XCPT)
16757 {
16758 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
16759 rcStrict = VINF_SUCCESS;
16760 }
16761 return rcStrict;
16762}
16763#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16764/** @} */
16765
16766
16767#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
16768/** @name Nested-guest VM-exit handlers.
16769 * @{
16770 */
16771/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
16772/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- Nested-guest VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
16773/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
16774
16775/**
16776 * Nested-guest VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
16777 * Conditional VM-exit.
16778 */
16779HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16780{
16781 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16782
16783 hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
16784
16785 uint64_t const uExitIntInfo = pVmxTransient->uExitIntInfo;
16786 uint32_t const uExitIntType = VMX_EXIT_INT_INFO_TYPE(uExitIntInfo);
16787 Assert(VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo));
16788
16789 switch (uExitIntType)
16790 {
16791 /*
16792 * Physical NMIs:
16793 * We shouldn't direct host physical NMIs to the nested-guest. Dispatch it to the host.
16794 */
16795 case VMX_EXIT_INT_INFO_TYPE_NMI:
16796 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);
16797
16798 /*
16799 * Hardware exceptions,
16800 * Software exceptions,
16801 * Privileged software exceptions:
16802 * Figure out if the exception must be delivered to the guest or the nested-guest.
16803 */
16804 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:
16805 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:
16806 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:
16807 {
16808 hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
16809 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16810 hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
16811 hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
16812
16813 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
16814 bool const fIntercept = CPUMIsGuestVmxXcptInterceptSet(pCtx, VMX_EXIT_INT_INFO_VECTOR(uExitIntInfo),
16815 pVmxTransient->uExitIntErrorCode);
16816 if (fIntercept)
16817 {
16818 /* Exit qualification is required for debug and page-fault exceptions. */
16819 hmR0VmxReadExitQualVmcs(pVmxTransient);
16820
16821 /*
16822 * For VM-exits due to software exceptions (those generated by INT3 or INTO) and privileged
16823 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
16824 * length. However, if delivery of a software interrupt, software exception or privileged
16825 * software exception causes a VM-exit, that too provides the VM-exit instruction length.
16826 */
16827 VMXVEXITINFO ExitInfo;
16828 RT_ZERO(ExitInfo);
16829 ExitInfo.uReason = pVmxTransient->uExitReason;
16830 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
16831 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16832
16833 VMXVEXITEVENTINFO ExitEventInfo;
16834 RT_ZERO(ExitEventInfo);
16835 ExitEventInfo.uExitIntInfo = pVmxTransient->uExitIntInfo;
16836 ExitEventInfo.uExitIntErrCode = pVmxTransient->uExitIntErrorCode;
16837 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
16838 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
16839
16840#ifdef DEBUG_ramshankar
16841 hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);
16842 Log4Func(("exit_int_info=%#RX32 err_code=%#RX32 exit_qual=%#RX64\n", pVmxTransient->uExitIntInfo,
16843 pVmxTransient->uExitIntErrorCode, pVmxTransient->uExitQual));
16844 if (VMX_IDT_VECTORING_INFO_IS_VALID(pVmxTransient->uIdtVectoringInfo))
16845 {
16846 Log4Func(("idt_info=%#RX32 idt_errcode=%#RX32 cr2=%#RX64\n", pVmxTransient->uIdtVectoringInfo,
16847 pVmxTransient->uIdtVectoringErrorCode, pCtx->cr2));
16848 }
16849#endif
16850 return IEMExecVmxVmexitXcpt(pVCpu, &ExitInfo, &ExitEventInfo);
16851 }
16852
16853 /* Nested paging is currently a requirement, otherwise we would need to handle shadow #PFs in hmR0VmxExitXcptPF. */
16854 Assert(pVCpu->CTX_SUFF(pVM)->hmr0.s.fNestedPaging);
16855 return hmR0VmxExitXcpt(pVCpu, pVmxTransient);
16856 }
16857
16858 /*
16859 * Software interrupts:
16860 * VM-exits cannot be caused by software interrupts.
16861 *
16862 * External interrupts:
16863 * This should only happen when "acknowledge external interrupts on VM-exit"
16864 * control is set. However, we never set this when executing a guest or
16865 * nested-guest. For nested-guests it is emulated while injecting interrupts into
16866 * the guest.
16867 */
16868 case VMX_EXIT_INT_INFO_TYPE_SW_INT:
16869 case VMX_EXIT_INT_INFO_TYPE_EXT_INT:
16870 default:
16871 {
16872 pVCpu->hm.s.u32HMError = pVmxTransient->uExitIntInfo;
16873 return VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_TYPE;
16874 }
16875 }
16876}
16877
16878
16879/**
16880 * Nested-guest VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT).
16881 * Unconditional VM-exit.
16882 */
16883HMVMX_EXIT_DECL hmR0VmxExitTripleFaultNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16884{
16885 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16886 return IEMExecVmxVmexitTripleFault(pVCpu);
16887}
16888
16889
16890/**
16891 * Nested-guest VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
16892 */
16893HMVMX_EXIT_NSRC_DECL hmR0VmxExitIntWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16894{
16895 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16896
16897 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
16898 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
16899 return hmR0VmxExitIntWindow(pVCpu, pVmxTransient);
16900}
16901
16902
16903/**
16904 * Nested-guest VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
16905 */
16906HMVMX_EXIT_NSRC_DECL hmR0VmxExitNmiWindowNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16907{
16908 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16909
16910 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT))
16911 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, 0 /* uExitQual */);
16912 return hmR0VmxExitIntWindow(pVCpu, pVmxTransient);
16913}
16914
16915
16916/**
16917 * Nested-guest VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH).
16918 * Unconditional VM-exit.
16919 */
16920HMVMX_EXIT_DECL hmR0VmxExitTaskSwitchNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16921{
16922 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16923
16924 hmR0VmxReadExitQualVmcs(pVmxTransient);
16925 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16926 hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
16927 hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
16928
16929 VMXVEXITINFO ExitInfo;
16930 RT_ZERO(ExitInfo);
16931 ExitInfo.uReason = pVmxTransient->uExitReason;
16932 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
16933 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16934
16935 VMXVEXITEVENTINFO ExitEventInfo;
16936 RT_ZERO(ExitEventInfo);
16937 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
16938 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
16939 return IEMExecVmxVmexitTaskSwitch(pVCpu, &ExitInfo, &ExitEventInfo);
16940}
16941
16942
16943/**
16944 * Nested-guest VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
16945 */
16946HMVMX_EXIT_DECL hmR0VmxExitHltNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16947{
16948 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16949
16950 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_HLT_EXIT))
16951 {
16952 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16953 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
16954 }
16955 return hmR0VmxExitHlt(pVCpu, pVmxTransient);
16956}
16957
16958
16959/**
16960 * Nested-guest VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
16961 */
16962HMVMX_EXIT_DECL hmR0VmxExitInvlpgNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16963{
16964 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16965
16966 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
16967 {
16968 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16969 hmR0VmxReadExitQualVmcs(pVmxTransient);
16970
16971 VMXVEXITINFO ExitInfo;
16972 RT_ZERO(ExitInfo);
16973 ExitInfo.uReason = pVmxTransient->uExitReason;
16974 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
16975 ExitInfo.u64Qual = pVmxTransient->uExitQual;
16976 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
16977 }
16978 return hmR0VmxExitInvlpg(pVCpu, pVmxTransient);
16979}
16980
16981
16982/**
16983 * Nested-guest VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
16984 */
16985HMVMX_EXIT_DECL hmR0VmxExitRdpmcNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
16986{
16987 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
16988
16989 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDPMC_EXIT))
16990 {
16991 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
16992 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
16993 }
16994 return hmR0VmxExitRdpmc(pVCpu, pVmxTransient);
16995}
16996
16997
16998/**
16999 * Nested-guest VM-exit handler for VMREAD (VMX_EXIT_VMREAD) and VMWRITE
17000 * (VMX_EXIT_VMWRITE). Conditional VM-exit.
17001 */
17002HMVMX_EXIT_DECL hmR0VmxExitVmreadVmwriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17003{
17004 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17005
17006 Assert( pVmxTransient->uExitReason == VMX_EXIT_VMREAD
17007 || pVmxTransient->uExitReason == VMX_EXIT_VMWRITE);
17008
17009 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
17010
17011 uint8_t const iGReg = pVmxTransient->ExitInstrInfo.VmreadVmwrite.iReg2;
17012 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
17013 uint64_t u64VmcsField = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
17014
17015 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
17016 if (!CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
17017 u64VmcsField &= UINT64_C(0xffffffff);
17018
17019 if (CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, pVmxTransient->uExitReason, u64VmcsField))
17020 {
17021 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
17022 hmR0VmxReadExitQualVmcs(pVmxTransient);
17023
17024 VMXVEXITINFO ExitInfo;
17025 RT_ZERO(ExitInfo);
17026 ExitInfo.uReason = pVmxTransient->uExitReason;
17027 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
17028 ExitInfo.u64Qual = pVmxTransient->uExitQual;
17029 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
17030 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
17031 }
17032
17033 if (pVmxTransient->uExitReason == VMX_EXIT_VMREAD)
17034 return hmR0VmxExitVmread(pVCpu, pVmxTransient);
17035 return hmR0VmxExitVmwrite(pVCpu, pVmxTransient);
17036}
17037
17038
17039/**
17040 * Nested-guest VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
17041 */
17042HMVMX_EXIT_DECL hmR0VmxExitRdtscNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17043{
17044 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17045
17046 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
17047 {
17048 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
17049 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
17050 }
17051
17052 return hmR0VmxExitRdtsc(pVCpu, pVmxTransient);
17053}
17054
17055
17056/**
17057 * Nested-guest VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX).
17058 * Conditional VM-exit.
17059 */
17060HMVMX_EXIT_DECL hmR0VmxExitMovCRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17061{
17062 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17063
17064 hmR0VmxReadExitQualVmcs(pVmxTransient);
17065 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
17066
17067 VBOXSTRICTRC rcStrict;
17068 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual);
17069 switch (uAccessType)
17070 {
17071 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE:
17072 {
17073 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
17074 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
17075 Assert(iGReg < RT_ELEMENTS(pVCpu->cpum.GstCtx.aGRegs));
17076 uint64_t const uNewCrX = pVCpu->cpum.GstCtx.aGRegs[iGReg].u64;
17077
17078 bool fIntercept;
17079 switch (iCrReg)
17080 {
17081 case 0:
17082 case 4:
17083 fIntercept = CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX);
17084 break;
17085
17086 case 3:
17087 fIntercept = CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCrX);
17088 break;
17089
17090 case 8:
17091 fIntercept = CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_CR8_LOAD_EXIT);
17092 break;
17093
17094 default:
17095 fIntercept = false;
17096 break;
17097 }
17098 if (fIntercept)
17099 {
17100 VMXVEXITINFO ExitInfo;
17101 RT_ZERO(ExitInfo);
17102 ExitInfo.uReason = pVmxTransient->uExitReason;
17103 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
17104 ExitInfo.u64Qual = pVmxTransient->uExitQual;
17105 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
17106 }
17107 else
17108 {
17109 int const rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, IEM_CPUMCTX_EXTRN_MUST_MASK);
17110 AssertRCReturn(rc, rc);
17111 rcStrict = hmR0VmxExitMovToCrX(pVCpu, pVmxTransient->cbExitInstr, iGReg, iCrReg);
17112 }
17113 break;
17114 }
17115
17116 case VMX_EXIT_QUAL_CRX_ACCESS_READ:
17117 {
17118 /*
17119 * CR0/CR4 reads do not cause VM-exits, the read-shadow is used (subject to masking).
17120 * CR2 reads do not cause a VM-exit.
17121 * CR3 reads cause a VM-exit depending on the "CR3 store exiting" control.
17122 * CR8 reads cause a VM-exit depending on the "CR8 store exiting" control.
17123 */
17124 uint8_t const iCrReg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);
17125 if ( iCrReg == 3
17126 || iCrReg == 8)
17127 {
17128 static const uint32_t s_auCrXReadIntercepts[] = { 0, 0, 0, VMX_PROC_CTLS_CR3_STORE_EXIT, 0,
17129 0, 0, 0, VMX_PROC_CTLS_CR8_STORE_EXIT };
17130 uint32_t const uIntercept = s_auCrXReadIntercepts[iCrReg];
17131 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, uIntercept))
17132 {
17133 VMXVEXITINFO ExitInfo;
17134 RT_ZERO(ExitInfo);
17135 ExitInfo.uReason = pVmxTransient->uExitReason;
17136 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
17137 ExitInfo.u64Qual = pVmxTransient->uExitQual;
17138 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
17139 }
17140 else
17141 {
17142 uint8_t const iGReg = VMX_EXIT_QUAL_CRX_GENREG(pVmxTransient->uExitQual);
17143 rcStrict = hmR0VmxExitMovFromCrX(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, iGReg, iCrReg);
17144 }
17145 }
17146 else
17147 {
17148 AssertMsgFailed(("MOV from CR%d VM-exit must not happen\n", iCrReg));
17149 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, iCrReg);
17150 }
17151 break;
17152 }
17153
17154 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS:
17155 {
17156 PCVMXVVMCS const pVmcsNstGst = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
17157 uint64_t const uGstHostMask = pVmcsNstGst->u64Cr0Mask.u;
17158 uint64_t const uReadShadow = pVmcsNstGst->u64Cr0ReadShadow.u;
17159 if ( (uGstHostMask & X86_CR0_TS)
17160 && (uReadShadow & X86_CR0_TS))
17161 {
17162 VMXVEXITINFO ExitInfo;
17163 RT_ZERO(ExitInfo);
17164 ExitInfo.uReason = pVmxTransient->uExitReason;
17165 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
17166 ExitInfo.u64Qual = pVmxTransient->uExitQual;
17167 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
17168 }
17169 else
17170 rcStrict = hmR0VmxExitClts(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr);
17171 break;
17172 }
17173
17174 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
17175 {
17176 RTGCPTR GCPtrEffDst;
17177 uint16_t const uNewMsw = VMX_EXIT_QUAL_CRX_LMSW_DATA(pVmxTransient->uExitQual);
17178 bool const fMemOperand = VMX_EXIT_QUAL_CRX_LMSW_OP_MEM(pVmxTransient->uExitQual);
17179 if (fMemOperand)
17180 {
17181 hmR0VmxReadGuestLinearAddrVmcs(pVmxTransient);
17182 GCPtrEffDst = pVmxTransient->uGuestLinearAddr;
17183 }
17184 else
17185 GCPtrEffDst = NIL_RTGCPTR;
17186
17187 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
17188 {
17189 VMXVEXITINFO ExitInfo;
17190 RT_ZERO(ExitInfo);
17191 ExitInfo.uReason = pVmxTransient->uExitReason;
17192 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
17193 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
17194 ExitInfo.u64Qual = pVmxTransient->uExitQual;
17195 rcStrict = IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
17196 }
17197 else
17198 rcStrict = hmR0VmxExitLmsw(pVCpu, pVmxTransient->pVmcsInfo, pVmxTransient->cbExitInstr, uNewMsw, GCPtrEffDst);
17199 break;
17200 }
17201
17202 default:
17203 {
17204 AssertMsgFailed(("Unrecognized Mov CRX access type %#x\n", uAccessType));
17205 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, uAccessType);
17206 }
17207 }
17208
17209 if (rcStrict == VINF_IEM_RAISED_XCPT)
17210 {
17211 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK);
17212 rcStrict = VINF_SUCCESS;
17213 }
17214 return rcStrict;
17215}
17216
17217
17218/**
17219 * Nested-guest VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX).
17220 * Conditional VM-exit.
17221 */
17222HMVMX_EXIT_DECL hmR0VmxExitMovDRxNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17223{
17224 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17225
17226 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MOV_DR_EXIT))
17227 {
17228 hmR0VmxReadExitQualVmcs(pVmxTransient);
17229 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
17230
17231 VMXVEXITINFO ExitInfo;
17232 RT_ZERO(ExitInfo);
17233 ExitInfo.uReason = pVmxTransient->uExitReason;
17234 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
17235 ExitInfo.u64Qual = pVmxTransient->uExitQual;
17236 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
17237 }
17238 return hmR0VmxExitMovDRx(pVCpu, pVmxTransient);
17239}
17240
17241
17242/**
17243 * Nested-guest VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR).
17244 * Conditional VM-exit.
17245 */
17246HMVMX_EXIT_DECL hmR0VmxExitIoInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17247{
17248 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17249
17250 hmR0VmxReadExitQualVmcs(pVmxTransient);
17251
17252 uint32_t const uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQual);
17253 uint8_t const uIOSize = VMX_EXIT_QUAL_IO_SIZE(pVmxTransient->uExitQual);
17254 AssertReturn(uIOSize <= 3 && uIOSize != 2, VERR_VMX_IPE_1);
17255
17256 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses in bytes. */
17257 uint8_t const cbAccess = s_aIOSizes[uIOSize];
17258 if (CPUMIsGuestVmxIoInterceptSet(pVCpu, uIOPort, cbAccess))
17259 {
17260 /*
17261 * IN/OUT instruction:
17262 * - Provides VM-exit instruction length.
17263 *
17264 * INS/OUTS instruction:
17265 * - Provides VM-exit instruction length.
17266 * - Provides Guest-linear address.
17267 * - Optionally provides VM-exit instruction info (depends on CPU feature).
17268 */
17269 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
17270 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
17271
17272 /* Make sure we don't use stale/uninitialized VMX-transient info. below. */
17273 pVmxTransient->ExitInstrInfo.u = 0;
17274 pVmxTransient->uGuestLinearAddr = 0;
17275
17276 bool const fVmxInsOutsInfo = pVM->cpum.ro.GuestFeatures.fVmxInsOutInfo;
17277 bool const fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQual);
17278 if (fIOString)
17279 {
17280 hmR0VmxReadGuestLinearAddrVmcs(pVmxTransient);
17281 if (fVmxInsOutsInfo)
17282 {
17283 Assert(RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_VMCS_INS_OUTS)); /* Paranoia. */
17284 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
17285 }
17286 }
17287
17288 VMXVEXITINFO ExitInfo;
17289 RT_ZERO(ExitInfo);
17290 ExitInfo.uReason = pVmxTransient->uExitReason;
17291 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
17292 ExitInfo.u64Qual = pVmxTransient->uExitQual;
17293 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
17294 ExitInfo.u64GuestLinearAddr = pVmxTransient->uGuestLinearAddr;
17295 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
17296 }
17297 return hmR0VmxExitIoInstr(pVCpu, pVmxTransient);
17298}
17299
17300
17301/**
17302 * Nested-guest VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
17303 */
17304HMVMX_EXIT_DECL hmR0VmxExitRdmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17305{
17306 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17307
17308 uint32_t fMsrpm;
17309 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
17310 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
17311 else
17312 fMsrpm = VMXMSRPM_EXIT_RD;
17313
17314 if (fMsrpm & VMXMSRPM_EXIT_RD)
17315 {
17316 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
17317 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
17318 }
17319 return hmR0VmxExitRdmsr(pVCpu, pVmxTransient);
17320}
17321
17322
17323/**
17324 * Nested-guest VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
17325 */
17326HMVMX_EXIT_DECL hmR0VmxExitWrmsrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17327{
17328 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17329
17330 uint32_t fMsrpm;
17331 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_MSR_BITMAPS))
17332 fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, pVCpu->cpum.GstCtx.ecx);
17333 else
17334 fMsrpm = VMXMSRPM_EXIT_WR;
17335
17336 if (fMsrpm & VMXMSRPM_EXIT_WR)
17337 {
17338 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
17339 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
17340 }
17341 return hmR0VmxExitWrmsr(pVCpu, pVmxTransient);
17342}
17343
17344
17345/**
17346 * Nested-guest VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
17347 */
17348HMVMX_EXIT_DECL hmR0VmxExitMwaitNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17349{
17350 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17351
17352 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MWAIT_EXIT))
17353 {
17354 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
17355 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
17356 }
17357 return hmR0VmxExitMwait(pVCpu, pVmxTransient);
17358}
17359
17360
17361/**
17362 * Nested-guest VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional
17363 * VM-exit.
17364 */
17365HMVMX_EXIT_DECL hmR0VmxExitMtfNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17366{
17367 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17368
17369 /** @todo NSTVMX: Should consider debugging nested-guests using VM debugger. */
17370 hmR0VmxReadGuestPendingDbgXctps(pVmxTransient);
17371 VMXVEXITINFO ExitInfo;
17372 RT_ZERO(ExitInfo);
17373 ExitInfo.uReason = pVmxTransient->uExitReason;
17374 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
17375 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
17376}
17377
17378
17379/**
17380 * Nested-guest VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
17381 */
17382HMVMX_EXIT_DECL hmR0VmxExitMonitorNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17383{
17384 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17385
17386 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_MONITOR_EXIT))
17387 {
17388 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
17389 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
17390 }
17391 return hmR0VmxExitMonitor(pVCpu, pVmxTransient);
17392}
17393
17394
17395/**
17396 * Nested-guest VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
17397 */
17398HMVMX_EXIT_DECL hmR0VmxExitPauseNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17399{
17400 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17401
17402 /** @todo NSTVMX: Think about this more. Does the outer guest need to intercept
17403 * PAUSE when executing a nested-guest? If it does not, we would not need
17404 * to check for the intercepts here. Just call VM-exit... */
17405
17406 /* The CPU would have already performed the necessary CPL checks for PAUSE-loop exiting. */
17407 if ( CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_PAUSE_EXIT)
17408 || CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_PAUSE_LOOP_EXIT))
17409 {
17410 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
17411 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
17412 }
17413 return hmR0VmxExitPause(pVCpu, pVmxTransient);
17414}
17415
17416
17417/**
17418 * Nested-guest VM-exit handler for when the TPR value is lowered below the
17419 * specified threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
17420 */
17421HMVMX_EXIT_NSRC_DECL hmR0VmxExitTprBelowThresholdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17422{
17423 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17424
17425 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_USE_TPR_SHADOW))
17426 {
17427 hmR0VmxReadGuestPendingDbgXctps(pVmxTransient);
17428 VMXVEXITINFO ExitInfo;
17429 RT_ZERO(ExitInfo);
17430 ExitInfo.uReason = pVmxTransient->uExitReason;
17431 ExitInfo.u64GuestPendingDbgXcpts = pVmxTransient->uGuestPendingDbgXcpts;
17432 return IEMExecVmxVmexitTrapLike(pVCpu, &ExitInfo);
17433 }
17434 return hmR0VmxExitTprBelowThreshold(pVCpu, pVmxTransient);
17435}
17436
17437
17438/**
17439 * Nested-guest VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional
17440 * VM-exit.
17441 */
17442HMVMX_EXIT_DECL hmR0VmxExitApicAccessNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17443{
17444 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17445
17446 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
17447 hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
17448 hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
17449 hmR0VmxReadExitQualVmcs(pVmxTransient);
17450
17451 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
17452
17453 Log4Func(("at offset %#x type=%u\n", VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQual),
17454 VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQual)));
17455
17456 VMXVEXITINFO ExitInfo;
17457 RT_ZERO(ExitInfo);
17458 ExitInfo.uReason = pVmxTransient->uExitReason;
17459 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
17460 ExitInfo.u64Qual = pVmxTransient->uExitQual;
17461
17462 VMXVEXITEVENTINFO ExitEventInfo;
17463 RT_ZERO(ExitEventInfo);
17464 ExitEventInfo.uIdtVectoringInfo = pVmxTransient->uIdtVectoringInfo;
17465 ExitEventInfo.uIdtVectoringErrCode = pVmxTransient->uIdtVectoringErrorCode;
17466 return IEMExecVmxVmexitApicAccess(pVCpu, &ExitInfo, &ExitEventInfo);
17467}
17468
17469
17470/**
17471 * Nested-guest VM-exit handler for APIC write emulation (VMX_EXIT_APIC_WRITE).
17472 * Conditional VM-exit.
17473 */
17474HMVMX_EXIT_DECL hmR0VmxExitApicWriteNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17475{
17476 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17477
17478 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_APIC_REG_VIRT));
17479 hmR0VmxReadExitQualVmcs(pVmxTransient);
17480 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
17481}
17482
17483
17484/**
17485 * Nested-guest VM-exit handler for virtualized EOI (VMX_EXIT_VIRTUALIZED_EOI).
17486 * Conditional VM-exit.
17487 */
17488HMVMX_EXIT_DECL hmR0VmxExitVirtEoiNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17489{
17490 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17491
17492 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_INT_DELIVERY));
17493 hmR0VmxReadExitQualVmcs(pVmxTransient);
17494 return IEMExecVmxVmexit(pVCpu, pVmxTransient->uExitReason, pVmxTransient->uExitQual);
17495}
17496
17497
17498/**
17499 * Nested-guest VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
17500 */
17501HMVMX_EXIT_DECL hmR0VmxExitRdtscpNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17502{
17503 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17504
17505 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_RDTSC_EXIT))
17506 {
17507 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_RDTSCP));
17508 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
17509 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
17510 }
17511 return hmR0VmxExitRdtscp(pVCpu, pVmxTransient);
17512}
17513
17514
17515/**
17516 * Nested-guest VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
17517 */
17518HMVMX_EXIT_NSRC_DECL hmR0VmxExitWbinvdNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17519{
17520 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17521
17522 if (CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_WBINVD_EXIT))
17523 {
17524 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
17525 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
17526 }
17527 return hmR0VmxExitWbinvd(pVCpu, pVmxTransient);
17528}
17529
17530
17531/**
17532 * Nested-guest VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
17533 */
17534HMVMX_EXIT_DECL hmR0VmxExitInvpcidNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17535{
17536 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17537
17538 if (CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INVLPG_EXIT))
17539 {
17540 Assert(CPUMIsGuestVmxProcCtls2Set(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_INVPCID));
17541 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
17542 hmR0VmxReadExitQualVmcs(pVmxTransient);
17543 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
17544
17545 VMXVEXITINFO ExitInfo;
17546 RT_ZERO(ExitInfo);
17547 ExitInfo.uReason = pVmxTransient->uExitReason;
17548 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
17549 ExitInfo.u64Qual = pVmxTransient->uExitQual;
17550 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
17551 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
17552 }
17553 return hmR0VmxExitInvpcid(pVCpu, pVmxTransient);
17554}
17555
17556
17557/**
17558 * Nested-guest VM-exit handler for invalid-guest state
17559 * (VMX_EXIT_ERR_INVALID_GUEST_STATE). Error VM-exit.
17560 */
17561HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestStateNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17562{
17563 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17564
17565 /*
17566 * Currently this should never happen because we fully emulate VMLAUNCH/VMRESUME in IEM.
17567 * So if it does happen, it indicates a bug possibly in the hardware-assisted VMX code.
17568 * Handle it like it's in an invalid guest state of the outer guest.
17569 *
17570 * When the fast path is implemented, this should be changed to cause the corresponding
17571 * nested-guest VM-exit.
17572 */
17573 return hmR0VmxExitErrInvalidGuestState(pVCpu, pVmxTransient);
17574}
17575
17576
17577/**
17578 * Nested-guest VM-exit handler for instructions that cause VM-exits uncondtionally
17579 * and only provide the instruction length.
17580 *
17581 * Unconditional VM-exit.
17582 */
17583HMVMX_EXIT_DECL hmR0VmxExitInstrNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17584{
17585 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17586
17587#ifdef VBOX_STRICT
17588 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
17589 switch (pVmxTransient->uExitReason)
17590 {
17591 case VMX_EXIT_ENCLS:
17592 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_ENCLS_EXIT));
17593 break;
17594
17595 case VMX_EXIT_VMFUNC:
17596 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_VMFUNC));
17597 break;
17598 }
17599#endif
17600
17601 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
17602 return IEMExecVmxVmexitInstr(pVCpu, pVmxTransient->uExitReason, pVmxTransient->cbExitInstr);
17603}
17604
17605
17606/**
17607 * Nested-guest VM-exit handler for instructions that provide instruction length as
17608 * well as more information.
17609 *
17610 * Unconditional VM-exit.
17611 */
17612HMVMX_EXIT_DECL hmR0VmxExitInstrWithInfoNested(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
17613{
17614 HMVMX_VALIDATE_NESTED_EXIT_HANDLER_PARAMS(pVCpu, pVmxTransient);
17615
17616#ifdef VBOX_STRICT
17617 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
17618 switch (pVmxTransient->uExitReason)
17619 {
17620 case VMX_EXIT_GDTR_IDTR_ACCESS:
17621 case VMX_EXIT_LDTR_TR_ACCESS:
17622 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_DESC_TABLE_EXIT));
17623 break;
17624
17625 case VMX_EXIT_RDRAND:
17626 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDRAND_EXIT));
17627 break;
17628
17629 case VMX_EXIT_RDSEED:
17630 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_RDSEED_EXIT));
17631 break;
17632
17633 case VMX_EXIT_XSAVES:
17634 case VMX_EXIT_XRSTORS:
17635 /** @todo NSTVMX: Verify XSS-bitmap. */
17636 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_XSAVES_XRSTORS));
17637 break;
17638
17639 case VMX_EXIT_UMWAIT:
17640 case VMX_EXIT_TPAUSE:
17641 Assert(CPUMIsGuestVmxProcCtlsSet(pCtx, VMX_PROC_CTLS_RDTSC_EXIT));
17642 Assert(CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_USER_WAIT_PAUSE));
17643 break;
17644
17645 case VMX_EXIT_LOADIWKEY:
17646 Assert(CPUMIsGuestVmxProcCtls3Set(pCtx, VMX_PROC_CTLS3_LOADIWKEY_EXIT));
17647 break;
17648 }
17649#endif
17650
17651 hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
17652 hmR0VmxReadExitQualVmcs(pVmxTransient);
17653 hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
17654
17655 VMXVEXITINFO ExitInfo;
17656 RT_ZERO(ExitInfo);
17657 ExitInfo.uReason = pVmxTransient->uExitReason;
17658 ExitInfo.cbInstr = pVmxTransient->cbExitInstr;
17659 ExitInfo.u64Qual = pVmxTransient->uExitQual;
17660 ExitInfo.InstrInfo = pVmxTransient->ExitInstrInfo;
17661 return IEMExecVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
17662}
17663
17664/** @} */
17665#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
17666
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette