VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 75671

Last change on this file since 75671 was 75671, checked in by vboxsync, 6 years ago

VMM: Nested VMX: bugref:9180 Implement NMI-unblocking due to IRET for VM-exits. Implemented restoring blocking of NMI when VM-entry fails while checking/loading guest-state. Fixed loading blocking by NMI during VM-entry.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 329.2 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 75671 2018-11-22 15:08:24Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
23/**
24 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
25 * relative offsets.
26 */
27# ifdef IEM_WITH_CODE_TLB
28# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
29# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
30# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
31# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
32# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
33# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
34# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
35# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
36# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
37# else /* !IEM_WITH_CODE_TLB */
38# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
39 do \
40 { \
41 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
42 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
43 } while (0)
44
45# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
46
47# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
48 do \
49 { \
50 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
51 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
52 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
53 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
54 } while (0)
55
56# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
57 do \
58 { \
59 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
60 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
61 } while (0)
62
63# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
64 do \
65 { \
66 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
67 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
68 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
69 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
70 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
71 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
72 } while (0)
73
74# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
75 do \
76 { \
77 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
78 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
79 } while (0)
80
81# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
82 do \
83 { \
84 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
85 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
86 } while (0)
87
88# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
89 do \
90 { \
91 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
92 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
93 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
94 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
95 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
96 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
97 } while (0)
98# endif /* !IEM_WITH_CODE_TLB */
99
100/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
101#define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
102
103/** Whether a shadow VMCS is present for the given VCPU. */
104#define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
105
106/** Gets the VMXON region pointer. */
107#define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
108
109/** Gets the guest-physical address of the current VMCS for the given VCPU. */
110#define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
111
112/** Whether a current VMCS is present for the given VCPU. */
113#define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
114
115/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
116#define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
117 do \
118 { \
119 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
120 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
121 } while (0)
122
123/** Clears any current VMCS for the given VCPU. */
124#define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
125 do \
126 { \
127 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
128 } while (0)
129
130/** Check for VMX instructions requiring to be in VMX operation.
131 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs updating. */
132#define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
133 do \
134 { \
135 if (IEM_VMX_IS_ROOT_MODE(a_pVCpu)) \
136 { /* likely */ } \
137 else \
138 { \
139 Log((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
140 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
141 return iemRaiseUndefinedOpcode(a_pVCpu); \
142 } \
143 } while (0)
144
145/** Marks a VM-entry failure with a diagnostic reason, logs and returns. */
146#define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag) \
147 do \
148 { \
149 Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_VmxDiag), \
150 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
151 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
152 return VERR_VMX_VMENTRY_FAILED; \
153 } while (0)
154
155/** Marks a VM-exit failure with a diagnostic reason, logs and returns. */
156#define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \
157 do \
158 { \
159 Log(("VM-exit failed! uExitReason=%u enmDiag=%u (%s) -> %s\n", (a_uExitReason), (a_VmxDiag), \
160 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
161 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
162 return VERR_VMX_VMEXIT_FAILED; \
163 } while (0)
164
165
166/*********************************************************************************************************************************
167* Global Variables *
168*********************************************************************************************************************************/
169/** @todo NSTVMX: The following VM-exit intercepts are pending:
170 * VMX_EXIT_IO_SMI
171 * VMX_EXIT_SMI
172 * VMX_EXIT_INT_WINDOW
173 * VMX_EXIT_NMI_WINDOW
174 * VMX_EXIT_GETSEC
175 * VMX_EXIT_RSM
176 * VMX_EXIT_MTF
177 * VMX_EXIT_MONITOR (APIC access VM-exit caused by MONITOR pending)
178 * VMX_EXIT_ERR_MACHINE_CHECK
179 * VMX_EXIT_TPR_BELOW_THRESHOLD
180 * VMX_EXIT_APIC_ACCESS
181 * VMX_EXIT_VIRTUALIZED_EOI
182 * VMX_EXIT_EPT_VIOLATION
183 * VMX_EXIT_EPT_MISCONFIG
184 * VMX_EXIT_INVEPT
185 * VMX_EXIT_PREEMPT_TIMER
186 * VMX_EXIT_INVVPID
187 * VMX_EXIT_APIC_WRITE
188 * VMX_EXIT_RDRAND
189 * VMX_EXIT_VMFUNC
190 * VMX_EXIT_ENCLS
191 * VMX_EXIT_RDSEED
192 * VMX_EXIT_PML_FULL
193 * VMX_EXIT_XSAVES
194 * VMX_EXIT_XRSTORS
195 */
196/**
197 * Map of VMCS field encodings to their virtual-VMCS structure offsets.
198 *
199 * The first array dimension is VMCS field encoding of Width OR'ed with Type and the
200 * second dimension is the Index, see VMXVMCSFIELDENC.
201 */
202uint16_t const g_aoffVmcsMap[16][VMX_V_VMCS_MAX_INDEX + 1] =
203{
204 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
205 {
206 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u16Vpid),
207 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u16PostIntNotifyVector),
208 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u16EptpIndex),
209 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
210 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
211 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
212 },
213 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
214 {
215 /* 0-7 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
216 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
217 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
218 /* 24-25 */ UINT16_MAX, UINT16_MAX
219 },
220 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
221 {
222 /* 0 */ RT_UOFFSETOF(VMXVVMCS, GuestEs),
223 /* 1 */ RT_UOFFSETOF(VMXVVMCS, GuestCs),
224 /* 2 */ RT_UOFFSETOF(VMXVVMCS, GuestSs),
225 /* 3 */ RT_UOFFSETOF(VMXVVMCS, GuestDs),
226 /* 4 */ RT_UOFFSETOF(VMXVVMCS, GuestFs),
227 /* 5 */ RT_UOFFSETOF(VMXVVMCS, GuestGs),
228 /* 6 */ RT_UOFFSETOF(VMXVVMCS, GuestLdtr),
229 /* 7 */ RT_UOFFSETOF(VMXVVMCS, GuestTr),
230 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u16GuestIntStatus),
231 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u16PmlIndex),
232 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
233 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
234 },
235 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
236 {
237 /* 0 */ RT_UOFFSETOF(VMXVVMCS, HostEs),
238 /* 1 */ RT_UOFFSETOF(VMXVVMCS, HostCs),
239 /* 2 */ RT_UOFFSETOF(VMXVVMCS, HostSs),
240 /* 3 */ RT_UOFFSETOF(VMXVVMCS, HostDs),
241 /* 4 */ RT_UOFFSETOF(VMXVVMCS, HostFs),
242 /* 5 */ RT_UOFFSETOF(VMXVVMCS, HostGs),
243 /* 6 */ RT_UOFFSETOF(VMXVVMCS, HostTr),
244 /* 7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
245 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
246 /* 23-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
247 },
248 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
249 {
250 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64AddrIoBitmapA),
251 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
252 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
253 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64AddrExitMsrStore),
254 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64AddrExitMsrLoad),
255 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEntryMsrLoad),
256 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
257 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64AddrPml),
258 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64TscOffset),
259 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVirtApic),
260 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64AddrApicAccess),
261 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64AddrPostedIntDesc),
262 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64VmFuncCtls),
263 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64EptpPtr),
264 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap0),
265 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap1),
266 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap2),
267 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap3),
268 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEptpList),
269 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVmreadBitmap),
270 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVmwriteBitmap),
271 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u64AddrXcptVeInfo),
272 /* 22 */ RT_UOFFSETOF(VMXVVMCS, u64XssBitmap),
273 /* 23 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEnclsBitmap),
274 /* 24 */ UINT16_MAX,
275 /* 25 */ RT_UOFFSETOF(VMXVVMCS, u64TscMultiplier)
276 },
277 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
278 {
279 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64RoGuestPhysAddr),
280 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
281 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
282 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
283 /* 25 */ UINT16_MAX
284 },
285 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
286 {
287 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
288 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
289 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPatMsr),
290 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64GuestEferMsr),
291 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
292 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte0),
293 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte1),
294 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte2),
295 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte3),
296 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
297 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
298 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
299 },
300 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
301 {
302 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64HostPatMsr),
303 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64HostEferMsr),
304 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
305 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
306 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
307 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
308 },
309 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
310 {
311 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32PinCtls),
312 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32ProcCtls),
313 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32XcptBitmap),
314 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32XcptPFMask),
315 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32XcptPFMatch),
316 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32Cr3TargetCount),
317 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32ExitCtls),
318 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
319 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
320 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u32EntryCtls),
321 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
322 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u32EntryIntInfo),
323 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
324 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u32EntryInstrLen),
325 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u32TprThreshold),
326 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u32ProcCtls2),
327 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u32PleGap),
328 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u32PleWindow),
329 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
330 },
331 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
332 {
333 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32RoVmInstrError),
334 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitReason),
335 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitIntInfo),
336 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitIntErrCode),
337 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
338 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
339 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitInstrLen),
340 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitInstrInfo),
341 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
342 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
343 /* 24-25 */ UINT16_MAX, UINT16_MAX
344 },
345 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
346 {
347 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsLimit),
348 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32GuestCsLimit),
349 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSsLimit),
350 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32GuestDsLimit),
351 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsLimit),
352 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32GuestFsLimit),
353 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGsLimit),
354 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
355 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u32GuestTrLimit),
356 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
357 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
358 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsAttr),
359 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u32GuestCsAttr),
360 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSsAttr),
361 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u32GuestDsAttr),
362 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u32GuestFsAttr),
363 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGsAttr),
364 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
365 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u32GuestTrAttr),
366 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u32GuestIntrState),
367 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u32GuestActivityState),
368 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSmBase),
369 /* 22 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSysenterCS),
370 /* 23 */ RT_UOFFSETOF(VMXVVMCS, u32PreemptTimer),
371 /* 24-25 */ UINT16_MAX, UINT16_MAX
372 },
373 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
374 {
375 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32HostSysenterCs),
376 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
377 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
378 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
379 /* 25 */ UINT16_MAX
380 },
381 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_CONTROL: */
382 {
383 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64Cr0Mask),
384 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64Cr4Mask),
385 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
386 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
387 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target0),
388 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target1),
389 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target2),
390 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target3),
391 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
392 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
393 /* 24-25 */ UINT16_MAX, UINT16_MAX
394 },
395 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
396 {
397 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64RoExitQual),
398 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRcx),
399 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRsi),
400 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRdi),
401 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRip),
402 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64RoGuestLinearAddr),
403 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
404 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
405 /* 22-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
406 },
407 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
408 {
409 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr0),
410 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr3),
411 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr4),
412 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64GuestEsBase),
413 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCsBase),
414 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSsBase),
415 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDsBase),
416 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64GuestFsBase),
417 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64GuestGsBase),
418 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64GuestLdtrBase),
419 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64GuestTrBase),
420 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64GuestGdtrBase),
421 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64GuestIdtrBase),
422 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDr7),
423 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRsp),
424 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRip),
425 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRFlags),
426 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpt),
427 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
428 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSysenterEip),
429 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
430 },
431 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_HOST_STATE: */
432 {
433 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr0),
434 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr3),
435 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr4),
436 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64HostFsBase),
437 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64HostGsBase),
438 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64HostTrBase),
439 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64HostGdtrBase),
440 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64HostIdtrBase),
441 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64HostSysenterEsp),
442 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64HostSysenterEip),
443 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64HostRsp),
444 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64HostRip),
445 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
446 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
447 }
448};
449
450
451/**
452 * Returns whether the given VMCS field is valid and supported by our emulation.
453 *
454 * @param pVCpu The cross context virtual CPU structure.
455 * @param u64FieldEnc The VMCS field encoding.
456 *
457 * @remarks This takes into account the CPU features exposed to the guest.
458 */
459IEM_STATIC bool iemVmxIsVmcsFieldValid(PVMCPU pVCpu, uint64_t u64FieldEnc)
460{
461 uint32_t const uFieldEncHi = RT_HI_U32(u64FieldEnc);
462 uint32_t const uFieldEncLo = RT_LO_U32(u64FieldEnc);
463 if (!uFieldEncHi)
464 { /* likely */ }
465 else
466 return false;
467
468 PCCPUMFEATURES pFeat = IEM_GET_GUEST_CPU_FEATURES(pVCpu);
469 switch (uFieldEncLo)
470 {
471 /*
472 * 16-bit fields.
473 */
474 /* Control fields. */
475 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
476 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
477 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
478
479 /* Guest-state fields. */
480 case VMX_VMCS16_GUEST_ES_SEL:
481 case VMX_VMCS16_GUEST_CS_SEL:
482 case VMX_VMCS16_GUEST_SS_SEL:
483 case VMX_VMCS16_GUEST_DS_SEL:
484 case VMX_VMCS16_GUEST_FS_SEL:
485 case VMX_VMCS16_GUEST_GS_SEL:
486 case VMX_VMCS16_GUEST_LDTR_SEL:
487 case VMX_VMCS16_GUEST_TR_SEL:
488 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
489 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
490
491 /* Host-state fields. */
492 case VMX_VMCS16_HOST_ES_SEL:
493 case VMX_VMCS16_HOST_CS_SEL:
494 case VMX_VMCS16_HOST_SS_SEL:
495 case VMX_VMCS16_HOST_DS_SEL:
496 case VMX_VMCS16_HOST_FS_SEL:
497 case VMX_VMCS16_HOST_GS_SEL:
498 case VMX_VMCS16_HOST_TR_SEL: return true;
499
500 /*
501 * 64-bit fields.
502 */
503 /* Control fields. */
504 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
505 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
506 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
507 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
508 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
509 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
510 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
511 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
512 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
513 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
514 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
515 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
516 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
517 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
518 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
519 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
520 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
521 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
522 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
523 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
524 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
525 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
526 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
527 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
528 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
529 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
530 case VMX_VMCS64_CTRL_EPTP_FULL:
531 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
532 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
533 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
534 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
535 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
536 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
537 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
538 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
539 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
540 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
541 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
542 {
543 uint64_t const uVmFuncMsr = CPUMGetGuestIa32VmxVmFunc(pVCpu);
544 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
545 }
546 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
547 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
548 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
549 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
550 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
551 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
552 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
553 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
554 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
555 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
556 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
557 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
558
559 /* Read-only data fields. */
560 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
561 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
562
563 /* Guest-state fields. */
564 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
565 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
566 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
567 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
568 case VMX_VMCS64_GUEST_PAT_FULL:
569 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
570 case VMX_VMCS64_GUEST_EFER_FULL:
571 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
572 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
573 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
574 case VMX_VMCS64_GUEST_PDPTE0_FULL:
575 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
576 case VMX_VMCS64_GUEST_PDPTE1_FULL:
577 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
578 case VMX_VMCS64_GUEST_PDPTE2_FULL:
579 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
580 case VMX_VMCS64_GUEST_PDPTE3_FULL:
581 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
582 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
583 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
584
585 /* Host-state fields. */
586 case VMX_VMCS64_HOST_PAT_FULL:
587 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
588 case VMX_VMCS64_HOST_EFER_FULL:
589 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
590 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
591 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
592
593 /*
594 * 32-bit fields.
595 */
596 /* Control fields. */
597 case VMX_VMCS32_CTRL_PIN_EXEC:
598 case VMX_VMCS32_CTRL_PROC_EXEC:
599 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
600 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
601 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
602 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
603 case VMX_VMCS32_CTRL_EXIT:
604 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
605 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
606 case VMX_VMCS32_CTRL_ENTRY:
607 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
608 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
609 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
610 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
611 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
612 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
613 case VMX_VMCS32_CTRL_PLE_GAP:
614 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
615
616 /* Read-only data fields. */
617 case VMX_VMCS32_RO_VM_INSTR_ERROR:
618 case VMX_VMCS32_RO_EXIT_REASON:
619 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
620 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
621 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
622 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
623 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
624 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
625
626 /* Guest-state fields. */
627 case VMX_VMCS32_GUEST_ES_LIMIT:
628 case VMX_VMCS32_GUEST_CS_LIMIT:
629 case VMX_VMCS32_GUEST_SS_LIMIT:
630 case VMX_VMCS32_GUEST_DS_LIMIT:
631 case VMX_VMCS32_GUEST_FS_LIMIT:
632 case VMX_VMCS32_GUEST_GS_LIMIT:
633 case VMX_VMCS32_GUEST_LDTR_LIMIT:
634 case VMX_VMCS32_GUEST_TR_LIMIT:
635 case VMX_VMCS32_GUEST_GDTR_LIMIT:
636 case VMX_VMCS32_GUEST_IDTR_LIMIT:
637 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
638 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
639 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
640 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
641 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
642 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
643 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
644 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
645 case VMX_VMCS32_GUEST_INT_STATE:
646 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
647 case VMX_VMCS32_GUEST_SMBASE:
648 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
649 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
650
651 /* Host-state fields. */
652 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
653
654 /*
655 * Natural-width fields.
656 */
657 /* Control fields. */
658 case VMX_VMCS_CTRL_CR0_MASK:
659 case VMX_VMCS_CTRL_CR4_MASK:
660 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
661 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
662 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
663 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
664 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
665 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
666
667 /* Read-only data fields. */
668 case VMX_VMCS_RO_EXIT_QUALIFICATION:
669 case VMX_VMCS_RO_IO_RCX:
670 case VMX_VMCS_RO_IO_RSX:
671 case VMX_VMCS_RO_IO_RDI:
672 case VMX_VMCS_RO_IO_RIP:
673 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
674
675 /* Guest-state fields. */
676 case VMX_VMCS_GUEST_CR0:
677 case VMX_VMCS_GUEST_CR3:
678 case VMX_VMCS_GUEST_CR4:
679 case VMX_VMCS_GUEST_ES_BASE:
680 case VMX_VMCS_GUEST_CS_BASE:
681 case VMX_VMCS_GUEST_SS_BASE:
682 case VMX_VMCS_GUEST_DS_BASE:
683 case VMX_VMCS_GUEST_FS_BASE:
684 case VMX_VMCS_GUEST_GS_BASE:
685 case VMX_VMCS_GUEST_LDTR_BASE:
686 case VMX_VMCS_GUEST_TR_BASE:
687 case VMX_VMCS_GUEST_GDTR_BASE:
688 case VMX_VMCS_GUEST_IDTR_BASE:
689 case VMX_VMCS_GUEST_DR7:
690 case VMX_VMCS_GUEST_RSP:
691 case VMX_VMCS_GUEST_RIP:
692 case VMX_VMCS_GUEST_RFLAGS:
693 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
694 case VMX_VMCS_GUEST_SYSENTER_ESP:
695 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
696
697 /* Host-state fields. */
698 case VMX_VMCS_HOST_CR0:
699 case VMX_VMCS_HOST_CR3:
700 case VMX_VMCS_HOST_CR4:
701 case VMX_VMCS_HOST_FS_BASE:
702 case VMX_VMCS_HOST_GS_BASE:
703 case VMX_VMCS_HOST_TR_BASE:
704 case VMX_VMCS_HOST_GDTR_BASE:
705 case VMX_VMCS_HOST_IDTR_BASE:
706 case VMX_VMCS_HOST_SYSENTER_ESP:
707 case VMX_VMCS_HOST_SYSENTER_EIP:
708 case VMX_VMCS_HOST_RSP:
709 case VMX_VMCS_HOST_RIP: return true;
710 }
711
712 return false;
713}
714
715
716/**
717 * Gets a host selector from the VMCS.
718 *
719 * @param pVmcs Pointer to the virtual VMCS.
720 * @param iSelReg The index of the segment register (X86_SREG_XXX).
721 */
722DECLINLINE(RTSEL) iemVmxVmcsGetHostSelReg(PCVMXVVMCS pVmcs, uint8_t iSegReg)
723{
724 Assert(iSegReg < X86_SREG_COUNT);
725 RTSEL HostSel;
726 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
727 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
728 uint8_t const uWidthType = (uWidth << 2) | uType;
729 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
730 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
731 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
732 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
733 uint8_t const *pbField = pbVmcs + offField;
734 HostSel = *(uint16_t *)pbField;
735 return HostSel;
736}
737
738
739/**
740 * Sets a guest segment register in the VMCS.
741 *
742 * @param pVmcs Pointer to the virtual VMCS.
743 * @param iSegReg The index of the segment register (X86_SREG_XXX).
744 * @param pSelReg Pointer to the segment register.
745 */
746IEM_STATIC void iemVmxVmcsSetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCCPUMSELREG pSelReg)
747{
748 Assert(pSelReg);
749 Assert(iSegReg < X86_SREG_COUNT);
750
751 /* Selector. */
752 {
753 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
754 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
755 uint8_t const uWidthType = (uWidth << 2) | uType;
756 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
757 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
758 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
759 uint8_t *pbVmcs = (uint8_t *)pVmcs;
760 uint8_t *pbField = pbVmcs + offField;
761 *(uint16_t *)pbField = pSelReg->Sel;
762 }
763
764 /* Limit. */
765 {
766 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
767 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
768 uint8_t const uWidthType = (uWidth << 2) | uType;
769 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
770 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
771 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
772 uint8_t *pbVmcs = (uint8_t *)pVmcs;
773 uint8_t *pbField = pbVmcs + offField;
774 *(uint32_t *)pbField = pSelReg->u32Limit;
775 }
776
777 /* Base. */
778 {
779 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
780 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
781 uint8_t const uWidthType = (uWidth << 2) | uType;
782 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
783 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
784 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
785 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
786 uint8_t const *pbField = pbVmcs + offField;
787 *(uint64_t *)pbField = pSelReg->u64Base;
788 }
789
790 /* Attributes. */
791 {
792 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
793 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
794 | X86DESCATTR_UNUSABLE;
795 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
796 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
797 uint8_t const uWidthType = (uWidth << 2) | uType;
798 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
799 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
800 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
801 uint8_t *pbVmcs = (uint8_t *)pVmcs;
802 uint8_t *pbField = pbVmcs + offField;
803 *(uint32_t *)pbField = pSelReg->Attr.u & fValidAttrMask;
804 }
805}
806
807
808/**
809 * Gets a guest segment register from the VMCS.
810 *
811 * @returns VBox status code.
812 * @param pVmcs Pointer to the virtual VMCS.
813 * @param iSegReg The index of the segment register (X86_SREG_XXX).
814 * @param pSelReg Where to store the segment register (only updated when
815 * VINF_SUCCESS is returned).
816 *
817 * @remarks Warning! This does not validate the contents of the retrieved segment
818 * register.
819 */
820IEM_STATIC int iemVmxVmcsGetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCPUMSELREG pSelReg)
821{
822 Assert(pSelReg);
823 Assert(iSegReg < X86_SREG_COUNT);
824
825 /* Selector. */
826 uint16_t u16Sel;
827 {
828 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
829 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
830 uint8_t const uWidthType = (uWidth << 2) | uType;
831 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
832 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
833 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
834 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
835 uint8_t const *pbField = pbVmcs + offField;
836 u16Sel = *(uint16_t *)pbField;
837 }
838
839 /* Limit. */
840 uint32_t u32Limit;
841 {
842 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
843 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
844 uint8_t const uWidthType = (uWidth << 2) | uType;
845 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
846 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
847 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
848 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
849 uint8_t const *pbField = pbVmcs + offField;
850 u32Limit = *(uint32_t *)pbField;
851 }
852
853 /* Base. */
854 uint64_t u64Base;
855 {
856 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
857 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
858 uint8_t const uWidthType = (uWidth << 2) | uType;
859 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
860 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
861 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
862 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
863 uint8_t const *pbField = pbVmcs + offField;
864 u64Base = *(uint64_t *)pbField;
865 /** @todo NSTVMX: Should we zero out high bits here for 32-bit virtual CPUs? */
866 }
867
868 /* Attributes. */
869 uint32_t u32Attr;
870 {
871 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
872 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
873 uint8_t const uWidthType = (uWidth << 2) | uType;
874 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
875 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
876 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
877 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
878 uint8_t const *pbField = pbVmcs + offField;
879 u32Attr = *(uint32_t *)pbField;
880 }
881
882 pSelReg->Sel = u16Sel;
883 pSelReg->ValidSel = u16Sel;
884 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
885 pSelReg->u32Limit = u32Limit;
886 pSelReg->u64Base = u64Base;
887 pSelReg->Attr.u = u32Attr;
888 return VINF_SUCCESS;
889}
890
891
892/**
893 * Gets a CR3 target value from the VMCS.
894 *
895 * @returns VBox status code.
896 * @param pVmcs Pointer to the virtual VMCS.
897 * @param idxCr3Target The index of the CR3-target value to retrieve.
898 * @param puValue Where to store the CR3-target value.
899 */
900DECLINLINE(uint64_t) iemVmxVmcsGetCr3TargetValue(PCVMXVVMCS pVmcs, uint8_t idxCr3Target)
901{
902 Assert(idxCr3Target < VMX_V_CR3_TARGET_COUNT);
903 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
904 uint8_t const uType = VMX_VMCS_ENC_TYPE_CONTROL;
905 uint8_t const uWidthType = (uWidth << 2) | uType;
906 uint8_t const uIndex = (idxCr3Target << 1) + RT_BF_GET(VMX_VMCS_CTRL_CR3_TARGET_VAL0, VMX_BF_VMCS_ENC_INDEX);
907 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
908 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
909 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
910 uint8_t const *pbField = pbVmcs + offField;
911 uint64_t const uCr3TargetValue = *(uint64_t *)pbField;
912
913 return uCr3TargetValue;
914}
915
916
917/**
918 * Converts an IEM exception event type to a VMX event type.
919 *
920 * @returns The VMX event type.
921 * @param uVector The interrupt / exception vector.
922 * @param fFlags The IEM event flag (see IEM_XCPT_FLAGS_XXX).
923 */
924DECLINLINE(uint8_t) iemVmxGetEventType(uint32_t uVector, uint32_t fFlags)
925{
926 /* Paranoia (callers may use these interchangeably). */
927 AssertCompile(VMX_EXIT_INT_INFO_TYPE_NMI == VMX_IDT_VECTORING_INFO_TYPE_NMI);
928 AssertCompile(VMX_EXIT_INT_INFO_TYPE_HW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT);
929 AssertCompile(VMX_EXIT_INT_INFO_TYPE_EXT_INT == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
930 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT);
931 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_INT == VMX_IDT_VECTORING_INFO_TYPE_SW_INT);
932 AssertCompile(VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
933 AssertCompile(VMX_EXIT_INT_INFO_TYPE_NMI == VMX_ENTRY_INT_INFO_TYPE_NMI);
934 AssertCompile(VMX_EXIT_INT_INFO_TYPE_HW_XCPT == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT);
935 AssertCompile(VMX_EXIT_INT_INFO_TYPE_EXT_INT == VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
936 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_XCPT == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT);
937 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_INT == VMX_ENTRY_INT_INFO_TYPE_SW_INT);
938 AssertCompile(VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT);
939
940 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
941 {
942 if (uVector == X86_XCPT_NMI)
943 return VMX_EXIT_INT_INFO_TYPE_NMI;
944 return VMX_EXIT_INT_INFO_TYPE_HW_XCPT;
945 }
946
947 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
948 {
949 if (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR))
950 return VMX_EXIT_INT_INFO_TYPE_SW_XCPT;
951 if (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
952 return VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT;
953 return VMX_EXIT_INT_INFO_TYPE_SW_INT;
954 }
955
956 Assert(fFlags & IEM_XCPT_FLAGS_T_EXT_INT);
957 return VMX_EXIT_INT_INFO_TYPE_EXT_INT;
958}
959
960
961/**
962 * Sets the VM-instruction error VMCS field.
963 *
964 * @param pVCpu The cross context virtual CPU structure.
965 * @param enmInsErr The VM-instruction error.
966 */
967DECL_FORCE_INLINE(void) iemVmxVmcsSetVmInstrErr(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
968{
969 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
970 pVmcs->u32RoVmInstrError = enmInsErr;
971}
972
973
974/**
975 * Sets the VM-exit qualification VMCS field.
976 *
977 * @param pVCpu The cross context virtual CPU structure.
978 * @param uExitQual The VM-exit qualification.
979 */
980DECL_FORCE_INLINE(void) iemVmxVmcsSetExitQual(PVMCPU pVCpu, uint64_t uExitQual)
981{
982 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
983 pVmcs->u64RoExitQual.u = uExitQual;
984}
985
986
987/**
988 * Sets the VM-exit interruption information field.
989 *
990 * @param pVCpu The cross context virtual CPU structure.
991 * @param uExitQual The VM-exit interruption information.
992 */
993DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntInfo(PVMCPU pVCpu, uint32_t uExitIntInfo)
994{
995 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
996 pVmcs->u32RoExitIntInfo = uExitIntInfo;
997}
998
999
1000/**
1001 * Sets the VM-exit interruption error code.
1002 *
1003 * @param pVCpu The cross context virtual CPU structure.
1004 * @param uErrCode The error code.
1005 */
1006DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntErrCode(PVMCPU pVCpu, uint32_t uErrCode)
1007{
1008 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1009 pVmcs->u32RoExitIntErrCode = uErrCode;
1010}
1011
1012
1013/**
1014 * Sets the IDT-vectoring information field.
1015 *
1016 * @param pVCpu The cross context virtual CPU structure.
1017 * @param uIdtVectorInfo The IDT-vectoring information.
1018 */
1019DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringInfo(PVMCPU pVCpu, uint32_t uIdtVectorInfo)
1020{
1021 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1022 pVmcs->u32RoIdtVectoringInfo = uIdtVectorInfo;
1023}
1024
1025
1026/**
1027 * Sets the IDT-vectoring error code field.
1028 *
1029 * @param pVCpu The cross context virtual CPU structure.
1030 * @param uErrCode The error code.
1031 */
1032DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringErrCode(PVMCPU pVCpu, uint32_t uErrCode)
1033{
1034 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1035 pVmcs->u32RoIdtVectoringErrCode = uErrCode;
1036}
1037
1038
1039/**
1040 * Sets the VM-exit guest-linear address VMCS field.
1041 *
1042 * @param pVCpu The cross context virtual CPU structure.
1043 * @param uGuestLinearAddr The VM-exit guest-linear address.
1044 */
1045DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestLinearAddr(PVMCPU pVCpu, uint64_t uGuestLinearAddr)
1046{
1047 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1048 pVmcs->u64RoGuestLinearAddr.u = uGuestLinearAddr;
1049}
1050
1051
1052/**
1053 * Sets the VM-exit guest-physical address VMCS field.
1054 *
1055 * @param pVCpu The cross context virtual CPU structure.
1056 * @param uGuestPhysAddr The VM-exit guest-physical address.
1057 */
1058DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestPhysAddr(PVMCPU pVCpu, uint64_t uGuestPhysAddr)
1059{
1060 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1061 pVmcs->u64RoGuestPhysAddr.u = uGuestPhysAddr;
1062}
1063
1064
1065/**
1066 * Sets the VM-exit instruction length VMCS field.
1067 *
1068 * @param pVCpu The cross context virtual CPU structure.
1069 * @param cbInstr The VM-exit instruction length in bytes.
1070 *
1071 * @remarks Callers may clear this field to 0. Hence, this function does not check
1072 * the validity of the instruction length.
1073 */
1074DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrLen(PVMCPU pVCpu, uint32_t cbInstr)
1075{
1076 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1077 pVmcs->u32RoExitInstrLen = cbInstr;
1078}
1079
1080
1081/**
1082 * Sets the VM-exit instruction info. VMCS field.
1083 *
1084 * @param pVCpu The cross context virtual CPU structure.
1085 * @param uExitInstrInfo The VM-exit instruction information.
1086 */
1087DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitInstrInfo)
1088{
1089 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1090 pVmcs->u32RoExitInstrInfo = uExitInstrInfo;
1091}
1092
1093
1094/**
1095 * Implements VMSucceed for VMX instruction success.
1096 *
1097 * @param pVCpu The cross context virtual CPU structure.
1098 */
1099DECL_FORCE_INLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
1100{
1101 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1102}
1103
1104
1105/**
1106 * Implements VMFailInvalid for VMX instruction failure.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 */
1110DECL_FORCE_INLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
1111{
1112 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1113 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
1114}
1115
1116
1117/**
1118 * Implements VMFailValid for VMX instruction failure.
1119 *
1120 * @param pVCpu The cross context virtual CPU structure.
1121 * @param enmInsErr The VM instruction error.
1122 */
1123DECL_FORCE_INLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1124{
1125 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1126 {
1127 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1128 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
1129 iemVmxVmcsSetVmInstrErr(pVCpu, enmInsErr);
1130 }
1131}
1132
1133
1134/**
1135 * Implements VMFail for VMX instruction failure.
1136 *
1137 * @param pVCpu The cross context virtual CPU structure.
1138 * @param enmInsErr The VM instruction error.
1139 */
1140DECL_FORCE_INLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1141{
1142 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1143 iemVmxVmFailValid(pVCpu, enmInsErr);
1144 else
1145 iemVmxVmFailInvalid(pVCpu);
1146}
1147
1148
1149/**
1150 * Checks if the given auto-load/store MSR area count is valid for the
1151 * implementation.
1152 *
1153 * @returns @c true if it's within the valid limit, @c false otherwise.
1154 * @param pVCpu The cross context virtual CPU structure.
1155 * @param uMsrCount The MSR area count to check.
1156 */
1157DECL_FORCE_INLINE(bool) iemVmxIsAutoMsrCountValid(PVMCPU pVCpu, uint32_t uMsrCount)
1158{
1159 uint64_t const u64VmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
1160 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(u64VmxMiscMsr);
1161 Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
1162 if (uMsrCount <= cMaxSupportedMsrs)
1163 return true;
1164 return false;
1165}
1166
1167
1168/**
1169 * Flushes the current VMCS contents back to guest memory.
1170 *
1171 * @returns VBox status code.
1172 * @param pVCpu The cross context virtual CPU structure.
1173 */
1174DECL_FORCE_INLINE(int) iemVmxCommitCurrentVmcsToMemory(PVMCPU pVCpu)
1175{
1176 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1177 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
1178 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));
1179 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
1180 return rc;
1181}
1182
1183
1184/**
1185 * Implements VMSucceed for the VMREAD instruction and increments the guest RIP.
1186 *
1187 * @param pVCpu The cross context virtual CPU structure.
1188 */
1189DECL_FORCE_INLINE(void) iemVmxVmreadSuccess(PVMCPU pVCpu, uint8_t cbInstr)
1190{
1191 iemVmxVmSucceed(pVCpu);
1192 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1193}
1194
1195
1196/**
1197 * Gets the instruction diagnostic for segment base checks during VM-entry of a
1198 * nested-guest.
1199 *
1200 * @param iSegReg The segment index (X86_SREG_XXX).
1201 */
1202IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBase(unsigned iSegReg)
1203{
1204 switch (iSegReg)
1205 {
1206 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseCs;
1207 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseDs;
1208 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseEs;
1209 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseFs;
1210 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseGs;
1211 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseSs;
1212 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_1);
1213 }
1214}
1215
1216
1217/**
1218 * Gets the instruction diagnostic for segment base checks during VM-entry of a
1219 * nested-guest that is in Virtual-8086 mode.
1220 *
1221 * @param iSegReg The segment index (X86_SREG_XXX).
1222 */
1223IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBaseV86(unsigned iSegReg)
1224{
1225 switch (iSegReg)
1226 {
1227 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseV86Cs;
1228 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ds;
1229 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseV86Es;
1230 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseV86Fs;
1231 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseV86Gs;
1232 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ss;
1233 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
1234 }
1235}
1236
1237
1238/**
1239 * Gets the instruction diagnostic for segment limit checks during VM-entry of a
1240 * nested-guest that is in Virtual-8086 mode.
1241 *
1242 * @param iSegReg The segment index (X86_SREG_XXX).
1243 */
1244IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegLimitV86(unsigned iSegReg)
1245{
1246 switch (iSegReg)
1247 {
1248 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegLimitV86Cs;
1249 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ds;
1250 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegLimitV86Es;
1251 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegLimitV86Fs;
1252 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegLimitV86Gs;
1253 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ss;
1254 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_3);
1255 }
1256}
1257
1258
1259/**
1260 * Gets the instruction diagnostic for segment attribute checks during VM-entry of a
1261 * nested-guest that is in Virtual-8086 mode.
1262 *
1263 * @param iSegReg The segment index (X86_SREG_XXX).
1264 */
1265IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrV86(unsigned iSegReg)
1266{
1267 switch (iSegReg)
1268 {
1269 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrV86Cs;
1270 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ds;
1271 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrV86Es;
1272 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrV86Fs;
1273 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrV86Gs;
1274 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ss;
1275 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_4);
1276 }
1277}
1278
1279
1280/**
1281 * Gets the instruction diagnostic for segment attributes reserved bits failure
1282 * during VM-entry of a nested-guest.
1283 *
1284 * @param iSegReg The segment index (X86_SREG_XXX).
1285 */
1286IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrRsvd(unsigned iSegReg)
1287{
1288 switch (iSegReg)
1289 {
1290 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdCs;
1291 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdDs;
1292 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrRsvdEs;
1293 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdFs;
1294 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdGs;
1295 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdSs;
1296 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_5);
1297 }
1298}
1299
1300
1301/**
1302 * Gets the instruction diagnostic for segment attributes descriptor-type
1303 * (code/segment or system) failure during VM-entry of a nested-guest.
1304 *
1305 * @param iSegReg The segment index (X86_SREG_XXX).
1306 */
1307IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDescType(unsigned iSegReg)
1308{
1309 switch (iSegReg)
1310 {
1311 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeCs;
1312 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeDs;
1313 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeEs;
1314 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeFs;
1315 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeGs;
1316 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs;
1317 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_6);
1318 }
1319}
1320
1321
1322/**
1323 * Gets the instruction diagnostic for segment attributes descriptor-type
1324 * (code/segment or system) failure during VM-entry of a nested-guest.
1325 *
1326 * @param iSegReg The segment index (X86_SREG_XXX).
1327 */
1328IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrPresent(unsigned iSegReg)
1329{
1330 switch (iSegReg)
1331 {
1332 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrPresentCs;
1333 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrPresentDs;
1334 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrPresentEs;
1335 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrPresentFs;
1336 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrPresentGs;
1337 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrPresentSs;
1338 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_7);
1339 }
1340}
1341
1342
1343/**
1344 * Gets the instruction diagnostic for segment attribute granularity failure during
1345 * VM-entry of a nested-guest.
1346 *
1347 * @param iSegReg The segment index (X86_SREG_XXX).
1348 */
1349IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrGran(unsigned iSegReg)
1350{
1351 switch (iSegReg)
1352 {
1353 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrGranCs;
1354 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrGranDs;
1355 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrGranEs;
1356 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrGranFs;
1357 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrGranGs;
1358 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrGranSs;
1359 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_8);
1360 }
1361}
1362
1363/**
1364 * Gets the instruction diagnostic for segment attribute DPL/RPL failure during
1365 * VM-entry of a nested-guest.
1366 *
1367 * @param iSegReg The segment index (X86_SREG_XXX).
1368 */
1369IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDplRpl(unsigned iSegReg)
1370{
1371 switch (iSegReg)
1372 {
1373 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplCs;
1374 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplDs;
1375 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDplRplEs;
1376 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplFs;
1377 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplGs;
1378 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplSs;
1379 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_9);
1380 }
1381}
1382
1383
1384/**
1385 * Gets the instruction diagnostic for segment attribute type accessed failure
1386 * during VM-entry of a nested-guest.
1387 *
1388 * @param iSegReg The segment index (X86_SREG_XXX).
1389 */
1390IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrTypeAcc(unsigned iSegReg)
1391{
1392 switch (iSegReg)
1393 {
1394 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccCs;
1395 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccDs;
1396 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccEs;
1397 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccFs;
1398 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccGs;
1399 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs;
1400 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_10);
1401 }
1402}
1403
1404
1405/**
1406 * Gets the instruction diagnostic for guest CR3 referenced PDPTE reserved bits
1407 * failure during VM-entry of a nested-guest.
1408 *
1409 * @param iSegReg The PDPTE entry index.
1410 */
1411IEM_STATIC VMXVDIAG iemVmxGetDiagVmentryPdpteRsvd(unsigned iPdpte)
1412{
1413 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
1414 switch (iPdpte)
1415 {
1416 case 0: return kVmxVDiag_Vmentry_GuestPdpte0Rsvd;
1417 case 1: return kVmxVDiag_Vmentry_GuestPdpte1Rsvd;
1418 case 2: return kVmxVDiag_Vmentry_GuestPdpte2Rsvd;
1419 case 3: return kVmxVDiag_Vmentry_GuestPdpte3Rsvd;
1420 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_11);
1421 }
1422}
1423
1424
1425/**
1426 * Gets the instruction diagnostic for host CR3 referenced PDPTE reserved bits
1427 * failure during VM-exit of a nested-guest.
1428 *
1429 * @param iSegReg The PDPTE entry index.
1430 */
1431IEM_STATIC VMXVDIAG iemVmxGetDiagVmexitPdpteRsvd(unsigned iPdpte)
1432{
1433 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
1434 switch (iPdpte)
1435 {
1436 case 0: return kVmxVDiag_Vmexit_HostPdpte0Rsvd;
1437 case 1: return kVmxVDiag_Vmexit_HostPdpte1Rsvd;
1438 case 2: return kVmxVDiag_Vmexit_HostPdpte2Rsvd;
1439 case 3: return kVmxVDiag_Vmexit_HostPdpte3Rsvd;
1440 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_12);
1441 }
1442}
1443
1444
1445/**
1446 * Masks the nested-guest CR0/CR4 mask subjected to the corresponding guest/host
1447 * mask and the read-shadow (CR0/CR4 read).
1448 *
1449 * @returns The masked CR0/CR4.
1450 * @param pVCpu The cross context virtual CPU structure.
1451 * @param iCrReg The control register (either CR0 or CR4).
1452 * @param uGuestCrX The current guest CR0 or guest CR4.
1453 */
1454IEM_STATIC uint64_t iemVmxMaskCr0CR4(PVMCPU pVCpu, uint8_t iCrReg, uint64_t uGuestCrX)
1455{
1456 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
1457 Assert(iCrReg == 0 || iCrReg == 4);
1458
1459 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1460 Assert(pVmcs);
1461
1462 /*
1463 * For each CR0 or CR4 bit owned by the host, the corresponding bit is loaded from the
1464 * CR0 read shadow or CR4 read shadow. For each CR0 or CR4 bit that is not owned by the
1465 * host, the corresponding bit from the guest CR0 or guest CR4 is loaded.
1466 *
1467 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
1468 */
1469 uint64_t fGstHostMask;
1470 uint64_t fReadShadow;
1471 if (iCrReg == 0)
1472 {
1473 fGstHostMask = pVmcs->u64Cr0Mask.u;
1474 fReadShadow = pVmcs->u64Cr0ReadShadow.u;
1475 }
1476 else
1477 {
1478 fGstHostMask = pVmcs->u64Cr4Mask.u;
1479 fReadShadow = pVmcs->u64Cr4ReadShadow.u;
1480 }
1481
1482 uint64_t const fMaskedCrX = (fReadShadow & fGstHostMask) | (uGuestCrX & ~fGstHostMask);
1483 return fMaskedCrX;
1484}
1485
1486
1487/**
1488 * Saves the guest control registers, debug registers and some MSRs are part of
1489 * VM-exit.
1490 *
1491 * @param pVCpu The cross context virtual CPU structure.
1492 */
1493IEM_STATIC void iemVmxVmexitSaveGuestControlRegsMsrs(PVMCPU pVCpu)
1494{
1495 /*
1496 * Saves the guest control registers, debug registers and some MSRs.
1497 * See Intel spec. 27.3.1 "Saving Control Registers, Debug Registers and MSRs".
1498 */
1499 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1500
1501 /* Save control registers. */
1502 pVmcs->u64GuestCr0.u = pVCpu->cpum.GstCtx.cr0;
1503 pVmcs->u64GuestCr3.u = pVCpu->cpum.GstCtx.cr3;
1504 pVmcs->u64GuestCr4.u = pVCpu->cpum.GstCtx.cr4;
1505
1506 /* Save SYSENTER CS, ESP, EIP. */
1507 pVmcs->u32GuestSysenterCS = pVCpu->cpum.GstCtx.SysEnter.cs;
1508 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1509 {
1510 pVmcs->u64GuestSysenterEsp.u = pVCpu->cpum.GstCtx.SysEnter.esp;
1511 pVmcs->u64GuestSysenterEip.u = pVCpu->cpum.GstCtx.SysEnter.eip;
1512 }
1513 else
1514 {
1515 pVmcs->u64GuestSysenterEsp.s.Lo = pVCpu->cpum.GstCtx.SysEnter.esp;
1516 pVmcs->u64GuestSysenterEip.s.Lo = pVCpu->cpum.GstCtx.SysEnter.eip;
1517 }
1518
1519 /* Save debug registers (DR7 and IA32_DEBUGCTL MSR). */
1520 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG)
1521 {
1522 pVmcs->u64GuestDr7.u = pVCpu->cpum.GstCtx.dr[7];
1523 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
1524 }
1525
1526 /* Save PAT MSR. */
1527 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR)
1528 pVmcs->u64GuestPatMsr.u = pVCpu->cpum.GstCtx.msrPAT;
1529
1530 /* Save EFER MSR. */
1531 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR)
1532 pVmcs->u64GuestEferMsr.u = pVCpu->cpum.GstCtx.msrEFER;
1533
1534 /* We don't support clearing IA32_BNDCFGS MSR yet. */
1535 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR));
1536
1537 /* Nothing to do for SMBASE register - We don't support SMM yet. */
1538}
1539
1540
1541/**
1542 * Saves the guest force-flags in preparation of entering the nested-guest.
1543 *
1544 * @param pVCpu The cross context virtual CPU structure.
1545 */
1546IEM_STATIC void iemVmxVmentrySaveForceFlags(PVMCPU pVCpu)
1547{
1548 /* We shouldn't be called multiple times during VM-entry. */
1549 Assert(pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions == 0);
1550
1551 /* MTF should not be set outside VMX non-root mode. */
1552 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
1553
1554 /*
1555 * Preserve the required force-flags.
1556 *
1557 * We cache and clear force-flags that would affect the execution of the
1558 * nested-guest. Cached flags are then restored while returning to the guest
1559 * if necessary.
1560 *
1561 * - VMCPU_FF_INHIBIT_INTERRUPTS need not be cached as it only affects
1562 * interrupts until the completion of the current VMLAUNCH/VMRESUME
1563 * instruction. Interrupt inhibition for any nested-guest instruction
1564 * is supplied by the guest-interruptibility state VMCS field and will
1565 * be set up as part of loading the guest state.
1566 *
1567 * - VMCPU_FF_BLOCK_NMIS needs to be cached as VM-exits caused before
1568 * successful VM-entry (due to invalid guest-state) need to continue
1569 * blocking NMIs if it was in effect before VM-entry.
1570 *
1571 * - MTF need not be preserved as it's used only in VMX non-root mode and
1572 * is supplied through the VM-execution controls.
1573 *
1574 * The remaining FFs (e.g. timers, APIC updates) can stay in place so that
1575 * we will be able to generate interrupts that may cause VM-exits for
1576 * the nested-guest.
1577 */
1578 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
1579}
1580
1581
1582/**
1583 * Restores the guest force-flags in preparation of exiting the nested-guest.
1584 *
1585 * @param pVCpu The cross context virtual CPU structure.
1586 */
1587IEM_STATIC void iemVmxVmexitRestoreForceFlags(PVMCPU pVCpu)
1588{
1589 if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
1590 {
1591 VMCPU_FF_SET_MASK(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);
1592 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;
1593 }
1594}
1595
1596
1597/**
1598 * Perform a VMX transition updated PGM, IEM and CPUM.
1599 *
1600 * @param pVCpu The cross context virtual CPU structure.
1601 */
1602IEM_STATIC int iemVmxWorldSwitch(PVMCPU pVCpu)
1603{
1604 /*
1605 * Inform PGM about paging mode changes.
1606 * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
1607 * see comment in iemMemPageTranslateAndCheckAccess().
1608 */
1609 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
1610# ifdef IN_RING3
1611 Assert(rc != VINF_PGM_CHANGE_MODE);
1612# endif
1613 AssertRCReturn(rc, rc);
1614
1615 /* Inform CPUM (recompiler), can later be removed. */
1616 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1617
1618 /*
1619 * Flush the TLB with new CR3. This is required in case the PGM mode change
1620 * above doesn't actually change anything.
1621 */
1622 if (rc == VINF_SUCCESS)
1623 {
1624 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true);
1625 AssertRCReturn(rc, rc);
1626 }
1627
1628 /* Re-initialize IEM cache/state after the drastic mode switch. */
1629 iemReInitExec(pVCpu);
1630 return rc;
1631}
1632
1633
1634/**
1635 * Saves guest segment registers, GDTR, IDTR, LDTR, TR as part of VM-exit.
1636 *
1637 * @param pVCpu The cross context virtual CPU structure.
1638 */
1639IEM_STATIC void iemVmxVmexitSaveGuestSegRegs(PVMCPU pVCpu)
1640{
1641 /*
1642 * Save guest segment registers, GDTR, IDTR, LDTR, TR.
1643 * See Intel spec 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
1644 */
1645 /* CS, SS, ES, DS, FS, GS. */
1646 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1647 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
1648 {
1649 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1650 if (!pSelReg->Attr.n.u1Unusable)
1651 iemVmxVmcsSetGuestSegReg(pVmcs, iSegReg, pSelReg);
1652 else
1653 {
1654 /*
1655 * For unusable segments the attributes are undefined except for CS and SS.
1656 * For the rest we don't bother preserving anything but the unusable bit.
1657 */
1658 switch (iSegReg)
1659 {
1660 case X86_SREG_CS:
1661 pVmcs->GuestCs = pSelReg->Sel;
1662 pVmcs->u64GuestCsBase.u = pSelReg->u64Base;
1663 pVmcs->u32GuestCsLimit = pSelReg->u32Limit;
1664 pVmcs->u32GuestCsAttr = pSelReg->Attr.u & ( X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
1665 | X86DESCATTR_UNUSABLE);
1666 break;
1667
1668 case X86_SREG_SS:
1669 pVmcs->GuestSs = pSelReg->Sel;
1670 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1671 pVmcs->u64GuestSsBase.u &= UINT32_C(0xffffffff);
1672 pVmcs->u32GuestSsAttr = pSelReg->Attr.u & (X86DESCATTR_DPL | X86DESCATTR_UNUSABLE);
1673 break;
1674
1675 case X86_SREG_DS:
1676 pVmcs->GuestDs = pSelReg->Sel;
1677 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1678 pVmcs->u64GuestDsBase.u &= UINT32_C(0xffffffff);
1679 pVmcs->u32GuestDsAttr = X86DESCATTR_UNUSABLE;
1680 break;
1681
1682 case X86_SREG_ES:
1683 pVmcs->GuestEs = pSelReg->Sel;
1684 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1685 pVmcs->u64GuestEsBase.u &= UINT32_C(0xffffffff);
1686 pVmcs->u32GuestEsAttr = X86DESCATTR_UNUSABLE;
1687 break;
1688
1689 case X86_SREG_FS:
1690 pVmcs->GuestFs = pSelReg->Sel;
1691 pVmcs->u64GuestFsBase.u = pSelReg->u64Base;
1692 pVmcs->u32GuestFsAttr = X86DESCATTR_UNUSABLE;
1693 break;
1694
1695 case X86_SREG_GS:
1696 pVmcs->GuestGs = pSelReg->Sel;
1697 pVmcs->u64GuestGsBase.u = pSelReg->u64Base;
1698 pVmcs->u32GuestGsAttr = X86DESCATTR_UNUSABLE;
1699 break;
1700 }
1701 }
1702 }
1703
1704 /* Segment attribute bits 31:7 and 11:8 MBZ. */
1705 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
1706 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_UNUSABLE;
1707 /* LDTR. */
1708 {
1709 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.ldtr;
1710 pVmcs->GuestLdtr = pSelReg->Sel;
1711 pVmcs->u64GuestLdtrBase.u = pSelReg->u64Base;
1712 Assert(X86_IS_CANONICAL(pSelReg->u64Base));
1713 pVmcs->u32GuestLdtrLimit = pSelReg->u32Limit;
1714 pVmcs->u32GuestLdtrAttr = pSelReg->Attr.u & fValidAttrMask;
1715 }
1716
1717 /* TR. */
1718 {
1719 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.tr;
1720 pVmcs->GuestTr = pSelReg->Sel;
1721 pVmcs->u64GuestTrBase.u = pSelReg->u64Base;
1722 pVmcs->u32GuestTrLimit = pSelReg->u32Limit;
1723 pVmcs->u32GuestTrAttr = pSelReg->Attr.u & fValidAttrMask;
1724 }
1725
1726 /* GDTR. */
1727 pVmcs->u64GuestGdtrBase.u = pVCpu->cpum.GstCtx.gdtr.pGdt;
1728 pVmcs->u32GuestGdtrLimit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
1729
1730 /* IDTR. */
1731 pVmcs->u64GuestIdtrBase.u = pVCpu->cpum.GstCtx.idtr.pIdt;
1732 pVmcs->u32GuestIdtrLimit = pVCpu->cpum.GstCtx.idtr.cbIdt;
1733}
1734
1735
1736/**
1737 * Saves guest non-register state as part of VM-exit.
1738 *
1739 * @param pVCpu The cross context virtual CPU structure.
1740 * @param uExitReason The VM-exit reason.
1741 */
1742IEM_STATIC void iemVmxVmexitSaveGuestNonRegState(PVMCPU pVCpu, uint32_t uExitReason)
1743{
1744 /*
1745 * Save guest non-register state.
1746 * See Intel spec. 27.3.4 "Saving Non-Register State".
1747 */
1748 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1749
1750 /* Activity-state: VM-exits occur before changing the activity state, nothing further to do */
1751
1752 /* Interruptibility-state. */
1753 pVmcs->u32GuestIntrState = 0;
1754 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1755 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1756
1757 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1758 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
1759 {
1760 /** @todo NSTVMX: We can't distinguish between blocking-by-MovSS and blocking-by-STI
1761 * currently. */
1762 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1763 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1764 }
1765 /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */
1766
1767 /* Pending debug exceptions. */
1768 if ( uExitReason != VMX_EXIT_INIT_SIGNAL
1769 && uExitReason != VMX_EXIT_SMI
1770 && uExitReason != VMX_EXIT_ERR_MACHINE_CHECK
1771 && !HMVmxIsVmexitTrapLike(uExitReason))
1772 {
1773 /** @todo NSTVMX: also must exclude VM-exits caused by debug exceptions when
1774 * block-by-MovSS is in effect. */
1775 pVmcs->u64GuestPendingDbgXcpt.u = 0;
1776 }
1777
1778 /* Save VMX-preemption timer value. */
1779 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER)
1780 {
1781 uint32_t uPreemptTimer;
1782 if (uExitReason == VMX_EXIT_PREEMPT_TIMER)
1783 uPreemptTimer = 0;
1784 else
1785 {
1786 /*
1787 * Assume the following:
1788 * PreemptTimerShift = 5
1789 * VmcsPreemptTimer = 2 (i.e. need to decrement by 1 every 2 * RT_BIT(5) = 20000 TSC ticks)
1790 * VmentryTick = 50000 (TSC at time of VM-entry)
1791 *
1792 * CurTick Delta PreemptTimerVal
1793 * ----------------------------------
1794 * 60000 10000 2
1795 * 80000 30000 1
1796 * 90000 40000 0 -> VM-exit.
1797 *
1798 * If Delta >= VmcsPreemptTimer * RT_BIT(PreemptTimerShift) cause a VMX-preemption timer VM-exit.
1799 * The saved VMX-preemption timer value is calculated as follows:
1800 * PreemptTimerVal = VmcsPreemptTimer - (Delta / (VmcsPreemptTimer * RT_BIT(PreemptTimerShift)))
1801 * E.g.:
1802 * Delta = 10000
1803 * Tmp = 10000 / (2 * 10000) = 0.5
1804 * NewPt = 2 - 0.5 = 2
1805 * Delta = 30000
1806 * Tmp = 30000 / (2 * 10000) = 1.5
1807 * NewPt = 2 - 1.5 = 1
1808 * Delta = 40000
1809 * Tmp = 40000 / 20000 = 2
1810 * NewPt = 2 - 2 = 0
1811 */
1812 uint64_t const uCurTick = TMCpuTickGetNoCheck(pVCpu);
1813 uint64_t const uVmentryTick = pVCpu->cpum.GstCtx.hwvirt.vmx.uVmentryTick;
1814 uint64_t const uDelta = uCurTick - uVmentryTick;
1815 uint32_t const uVmcsPreemptVal = pVmcs->u32PreemptTimer;
1816 uPreemptTimer = uVmcsPreemptVal - ASMDivU64ByU32RetU32(uDelta, uVmcsPreemptVal * RT_BIT(VMX_V_PREEMPT_TIMER_SHIFT));
1817 }
1818
1819 pVmcs->u32PreemptTimer = uPreemptTimer;
1820 }
1821
1822
1823 /* PDPTEs. */
1824 /* We don't support EPT yet. */
1825 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
1826 pVmcs->u64GuestPdpte0.u = 0;
1827 pVmcs->u64GuestPdpte1.u = 0;
1828 pVmcs->u64GuestPdpte2.u = 0;
1829 pVmcs->u64GuestPdpte3.u = 0;
1830}
1831
1832
1833/**
1834 * Saves the guest-state as part of VM-exit.
1835 *
1836 * @returns VBox status code.
1837 * @param pVCpu The cross context virtual CPU structure.
1838 * @param uExitReason The VM-exit reason.
1839 */
1840IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPU pVCpu, uint32_t uExitReason)
1841{
1842 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1843 Assert(pVmcs);
1844
1845 iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu);
1846 iemVmxVmexitSaveGuestSegRegs(pVCpu);
1847
1848 /** @todo r=ramshankar: The below hack is no longer necessary because we invoke the
1849 * VM-exit after updating RIP. I'm leaving it in-place temporarily in case
1850 * we need to fix missing exit information or callers still setting
1851 * instruction-length field when it is not necessary. */
1852#if 0
1853 /*
1854 * Save guest RIP, RSP and RFLAGS.
1855 * See Intel spec. 27.3.3 "Saving RIP, RSP and RFLAGS".
1856 *
1857 * For trap-like VM-exits we must advance the RIP by the length of the instruction.
1858 * Callers must pass the instruction length in the VM-exit instruction length
1859 * field though it is undefined for such VM-exits. After updating RIP here, we clear
1860 * the VM-exit instruction length field.
1861 *
1862 * See Intel spec. 27.1 "Architectural State Before A VM Exit"
1863 */
1864 if (HMVmxIsTrapLikeVmexit(uExitReason))
1865 {
1866 uint8_t const cbInstr = pVmcs->u32RoExitInstrLen;
1867 AssertMsg(cbInstr >= 1 && cbInstr <= 15, ("uReason=%u cbInstr=%u\n", uExitReason, cbInstr));
1868 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1869 iemVmxVmcsSetExitInstrLen(pVCpu, 0 /* cbInstr */);
1870 }
1871#endif
1872
1873 /* We don't support enclave mode yet. */
1874 pVmcs->u64GuestRip.u = pVCpu->cpum.GstCtx.rip;
1875 pVmcs->u64GuestRsp.u = pVCpu->cpum.GstCtx.rsp;
1876 pVmcs->u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u; /** @todo NSTVMX: Check RFLAGS.RF handling. */
1877
1878 iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason);
1879}
1880
1881
1882/**
1883 * Saves the guest MSRs into the VM-exit auto-store MSRs area as part of VM-exit.
1884 *
1885 * @returns VBox status code.
1886 * @param pVCpu The cross context virtual CPU structure.
1887 * @param uExitReason The VM-exit reason (for diagnostic purposes).
1888 */
1889IEM_STATIC int iemVmxVmexitSaveGuestAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
1890{
1891 /*
1892 * Save guest MSRs.
1893 * See Intel spec. 27.4 "Saving MSRs".
1894 */
1895 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1896 const char *const pszFailure = "VMX-abort";
1897
1898 /*
1899 * The VM-exit MSR-store area address need not be a valid guest-physical address if the
1900 * VM-exit MSR-store count is 0. If this is the case, bail early without reading it.
1901 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
1902 */
1903 uint32_t const cMsrs = pVmcs->u32ExitMsrStoreCount;
1904 if (!cMsrs)
1905 return VINF_SUCCESS;
1906
1907 /*
1908 * Verify the MSR auto-store count. Physical CPUs can behave unpredictably if the count
1909 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
1910 * implementation causes a VMX-abort followed by a triple-fault.
1911 */
1912 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
1913 if (fIsMsrCountValid)
1914 { /* likely */ }
1915 else
1916 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount);
1917
1918 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
1919 Assert(pMsr);
1920 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
1921 {
1922 if ( !pMsr->u32Reserved
1923 && pMsr->u32Msr != MSR_IA32_SMBASE
1924 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
1925 {
1926 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pMsr->u32Msr, &pMsr->u64Value);
1927 if (rcStrict == VINF_SUCCESS)
1928 continue;
1929
1930 /*
1931 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
1932 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
1933 * recording the MSR index in the auxiliary info. field and indicated further by our
1934 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
1935 * if possible, or come up with a better, generic solution.
1936 */
1937 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
1938 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_READ
1939 ? kVmxVDiag_Vmexit_MsrStoreRing3
1940 : kVmxVDiag_Vmexit_MsrStore;
1941 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
1942 }
1943 else
1944 {
1945 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
1946 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreRsvd);
1947 }
1948 }
1949
1950 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrStore.u;
1951 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysAutoMsrArea,
1952 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), VMX_V_AUTOMSR_AREA_SIZE);
1953 if (RT_SUCCESS(rc))
1954 { /* likely */ }
1955 else
1956 {
1957 AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
1958 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys);
1959 }
1960
1961 NOREF(uExitReason);
1962 NOREF(pszFailure);
1963 return VINF_SUCCESS;
1964}
1965
1966
1967/**
1968 * Performs a VMX abort (due to an fatal error during VM-exit).
1969 *
1970 * @returns Strict VBox status code.
1971 * @param pVCpu The cross context virtual CPU structure.
1972 * @param enmAbort The VMX abort reason.
1973 */
1974IEM_STATIC VBOXSTRICTRC iemVmxAbort(PVMCPU pVCpu, VMXABORT enmAbort)
1975{
1976 /*
1977 * Perform the VMX abort.
1978 * See Intel spec. 27.7 "VMX Aborts".
1979 */
1980 LogFunc(("enmAbort=%u (%s) -> RESET\n", enmAbort, HMVmxGetAbortDesc(enmAbort)));
1981
1982 /* We don't support SMX yet. */
1983 pVCpu->cpum.GstCtx.hwvirt.vmx.enmAbort = enmAbort;
1984 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1985 {
1986 RTGCPHYS const GCPhysVmcs = IEM_VMX_GET_CURRENT_VMCS(pVCpu);
1987 uint32_t const offVmxAbort = RT_UOFFSETOF(VMXVVMCS, u32VmxAbortId);
1988 PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + offVmxAbort, &enmAbort, sizeof(enmAbort));
1989 }
1990
1991 return VINF_EM_TRIPLE_FAULT;
1992}
1993
1994
1995/**
1996 * Loads host control registers, debug registers and MSRs as part of VM-exit.
1997 *
1998 * @param pVCpu The cross context virtual CPU structure.
1999 */
2000IEM_STATIC void iemVmxVmexitLoadHostControlRegsMsrs(PVMCPU pVCpu)
2001{
2002 /*
2003 * Load host control registers, debug registers and MSRs.
2004 * See Intel spec. 27.5.1 "Loading Host Control Registers, Debug Registers, MSRs".
2005 */
2006 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2007 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2008
2009 /* CR0. */
2010 {
2011 /* Bits 63:32, 28:19, 17, 15:6, ET, CD, NW and CR0 MB1 bits are not modified. */
2012 uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
2013 uint64_t const fCr0IgnMask = UINT64_C(0xffffffff1ff8ffc0) | X86_CR0_ET | X86_CR0_CD | X86_CR0_NW | uCr0Fixed0;
2014 uint64_t const uHostCr0 = pVmcs->u64HostCr0.u;
2015 uint64_t const uGuestCr0 = pVCpu->cpum.GstCtx.cr0;
2016 uint64_t const uValidCr0 = (uHostCr0 & ~fCr0IgnMask) | (uGuestCr0 & fCr0IgnMask);
2017 CPUMSetGuestCR0(pVCpu, uValidCr0);
2018 }
2019
2020 /* CR4. */
2021 {
2022 /* CR4 MB1 bits are not modified. */
2023 uint64_t const fCr4IgnMask = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
2024 uint64_t const uHostCr4 = pVmcs->u64HostCr4.u;
2025 uint64_t const uGuestCr4 = pVCpu->cpum.GstCtx.cr4;
2026 uint64_t uValidCr4 = (uHostCr4 & ~fCr4IgnMask) | (uGuestCr4 & fCr4IgnMask);
2027 if (fHostInLongMode)
2028 uValidCr4 |= X86_CR4_PAE;
2029 else
2030 uValidCr4 &= ~X86_CR4_PCIDE;
2031 CPUMSetGuestCR4(pVCpu, uValidCr4);
2032 }
2033
2034 /* CR3 (host value validated while checking host-state during VM-entry). */
2035 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64HostCr3.u;
2036
2037 /* DR7. */
2038 pVCpu->cpum.GstCtx.dr[7] = X86_DR7_INIT_VAL;
2039
2040 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
2041
2042 /* Save SYSENTER CS, ESP, EIP (host value validated while checking host-state during VM-entry). */
2043 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64HostSysenterEip.u;
2044 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64HostSysenterEsp.u;
2045 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32HostSysenterCs;
2046
2047 /* FS, GS bases are loaded later while we load host segment registers. */
2048
2049 /* EFER MSR (host value validated while checking host-state during VM-entry). */
2050 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
2051 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64HostEferMsr.u;
2052 else if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2053 {
2054 if (fHostInLongMode)
2055 pVCpu->cpum.GstCtx.msrEFER |= (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
2056 else
2057 pVCpu->cpum.GstCtx.msrEFER &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
2058 }
2059
2060 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
2061
2062 /* PAT MSR (host value is validated while checking host-state during VM-entry). */
2063 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
2064 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64HostPatMsr.u;
2065
2066 /* We don't support IA32_BNDCFGS MSR yet. */
2067}
2068
2069
2070/**
2071 * Loads host segment registers, GDTR, IDTR, LDTR and TR as part of VM-exit.
2072 *
2073 * @param pVCpu The cross context virtual CPU structure.
2074 */
2075IEM_STATIC void iemVmxVmexitLoadHostSegRegs(PVMCPU pVCpu)
2076{
2077 /*
2078 * Load host segment registers, GDTR, IDTR, LDTR and TR.
2079 * See Intel spec. 27.5.2 "Loading Host Segment and Descriptor-Table Registers".
2080 *
2081 * Warning! Be careful to not touch fields that are reserved by VT-x,
2082 * e.g. segment limit high bits stored in segment attributes (in bits 11:8).
2083 */
2084 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2085 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2086
2087 /* CS, SS, ES, DS, FS, GS. */
2088 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
2089 {
2090 RTSEL const HostSel = iemVmxVmcsGetHostSelReg(pVmcs, iSegReg);
2091 bool const fUnusable = RT_BOOL(HostSel == 0);
2092
2093 /* Selector. */
2094 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel = HostSel;
2095 pVCpu->cpum.GstCtx.aSRegs[iSegReg].ValidSel = HostSel;
2096 pVCpu->cpum.GstCtx.aSRegs[iSegReg].fFlags = CPUMSELREG_FLAGS_VALID;
2097
2098 /* Limit. */
2099 pVCpu->cpum.GstCtx.aSRegs[iSegReg].u32Limit = 0xffffffff;
2100
2101 /* Base and Attributes. */
2102 switch (iSegReg)
2103 {
2104 case X86_SREG_CS:
2105 {
2106 pVCpu->cpum.GstCtx.cs.u64Base = 0;
2107 pVCpu->cpum.GstCtx.cs.Attr.n.u4Type = X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED;
2108 pVCpu->cpum.GstCtx.ss.Attr.n.u1DescType = 1;
2109 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = 0;
2110 pVCpu->cpum.GstCtx.cs.Attr.n.u1Present = 1;
2111 pVCpu->cpum.GstCtx.cs.Attr.n.u1Long = fHostInLongMode;
2112 pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig = !fHostInLongMode;
2113 pVCpu->cpum.GstCtx.cs.Attr.n.u1Granularity = 1;
2114 Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Unusable);
2115 Assert(!fUnusable);
2116 break;
2117 }
2118
2119 case X86_SREG_SS:
2120 case X86_SREG_ES:
2121 case X86_SREG_DS:
2122 {
2123 pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base = 0;
2124 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2125 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DescType = 1;
2126 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u2Dpl = 0;
2127 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Present = 1;
2128 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DefBig = 1;
2129 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Granularity = 1;
2130 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable = fUnusable;
2131 break;
2132 }
2133
2134 case X86_SREG_FS:
2135 {
2136 Assert(X86_IS_CANONICAL(pVmcs->u64HostFsBase.u));
2137 pVCpu->cpum.GstCtx.fs.u64Base = !fUnusable ? pVmcs->u64HostFsBase.u : 0;
2138 pVCpu->cpum.GstCtx.fs.Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2139 pVCpu->cpum.GstCtx.fs.Attr.n.u1DescType = 1;
2140 pVCpu->cpum.GstCtx.fs.Attr.n.u2Dpl = 0;
2141 pVCpu->cpum.GstCtx.fs.Attr.n.u1Present = 1;
2142 pVCpu->cpum.GstCtx.fs.Attr.n.u1DefBig = 1;
2143 pVCpu->cpum.GstCtx.fs.Attr.n.u1Granularity = 1;
2144 pVCpu->cpum.GstCtx.fs.Attr.n.u1Unusable = fUnusable;
2145 break;
2146 }
2147
2148 case X86_SREG_GS:
2149 {
2150 Assert(X86_IS_CANONICAL(pVmcs->u64HostGsBase.u));
2151 pVCpu->cpum.GstCtx.gs.u64Base = !fUnusable ? pVmcs->u64HostGsBase.u : 0;
2152 pVCpu->cpum.GstCtx.gs.Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2153 pVCpu->cpum.GstCtx.gs.Attr.n.u1DescType = 1;
2154 pVCpu->cpum.GstCtx.gs.Attr.n.u2Dpl = 0;
2155 pVCpu->cpum.GstCtx.gs.Attr.n.u1Present = 1;
2156 pVCpu->cpum.GstCtx.gs.Attr.n.u1DefBig = 1;
2157 pVCpu->cpum.GstCtx.gs.Attr.n.u1Granularity = 1;
2158 pVCpu->cpum.GstCtx.gs.Attr.n.u1Unusable = fUnusable;
2159 break;
2160 }
2161 }
2162 }
2163
2164 /* TR. */
2165 Assert(X86_IS_CANONICAL(pVmcs->u64HostTrBase.u));
2166 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1Unusable);
2167 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->HostTr;
2168 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->HostTr;
2169 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2170 pVCpu->cpum.GstCtx.tr.u32Limit = X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN;
2171 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64HostTrBase.u;
2172 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2173 pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType = 0;
2174 pVCpu->cpum.GstCtx.tr.Attr.n.u2Dpl = 0;
2175 pVCpu->cpum.GstCtx.tr.Attr.n.u1Present = 1;
2176 pVCpu->cpum.GstCtx.tr.Attr.n.u1DefBig = 0;
2177 pVCpu->cpum.GstCtx.tr.Attr.n.u1Granularity = 0;
2178
2179 /* LDTR. */
2180 pVCpu->cpum.GstCtx.ldtr.Sel = 0;
2181 pVCpu->cpum.GstCtx.ldtr.ValidSel = 0;
2182 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2183 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
2184 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
2185 pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Unusable = 1;
2186
2187 /* GDTR. */
2188 Assert(X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u));
2189 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64HostGdtrBase.u;
2190 pVCpu->cpum.GstCtx.gdtr.cbGdt = 0xfff;
2191
2192 /* IDTR.*/
2193 Assert(X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u));
2194 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64HostIdtrBase.u;
2195 pVCpu->cpum.GstCtx.idtr.cbIdt = 0xfff;
2196}
2197
2198
2199/**
2200 * Checks host PDPTes as part of VM-exit.
2201 *
2202 * @param pVCpu The cross context virtual CPU structure.
2203 * @param uExitReason The VM-exit reason (for logging purposes).
2204 */
2205IEM_STATIC int iemVmxVmexitCheckHostPdptes(PVMCPU pVCpu, uint32_t uExitReason)
2206{
2207 /*
2208 * Check host PDPTEs.
2209 * See Intel spec. 27.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
2210 */
2211 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2212 const char *const pszFailure = "VMX-abort";
2213 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2214
2215 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
2216 && !fHostInLongMode)
2217 {
2218 uint64_t const uHostCr3 = pVCpu->cpum.GstCtx.cr3 & X86_CR3_PAE_PAGE_MASK;
2219 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
2220 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uHostCr3, sizeof(aPdptes));
2221 if (RT_SUCCESS(rc))
2222 {
2223 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
2224 {
2225 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
2226 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
2227 { /* likely */ }
2228 else
2229 {
2230 VMXVDIAG const enmDiag = iemVmxGetDiagVmexitPdpteRsvd(iPdpte);
2231 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2232 }
2233 }
2234 }
2235 else
2236 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpteCr3ReadPhys);
2237 }
2238
2239 NOREF(pszFailure);
2240 NOREF(uExitReason);
2241 return VINF_SUCCESS;
2242}
2243
2244
2245/**
2246 * Loads the host MSRs from the VM-exit auto-load MSRs area as part of VM-exit.
2247 *
2248 * @returns VBox status code.
2249 * @param pVCpu The cross context virtual CPU structure.
2250 * @param pszInstr The VMX instruction name (for logging purposes).
2251 */
2252IEM_STATIC int iemVmxVmexitLoadHostAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
2253{
2254 /*
2255 * Load host MSRs.
2256 * See Intel spec. 27.6 "Loading MSRs".
2257 */
2258 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2259 const char *const pszFailure = "VMX-abort";
2260
2261 /*
2262 * The VM-exit MSR-load area address need not be a valid guest-physical address if the
2263 * VM-exit MSR load count is 0. If this is the case, bail early without reading it.
2264 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
2265 */
2266 uint32_t const cMsrs = pVmcs->u32ExitMsrLoadCount;
2267 if (!cMsrs)
2268 return VINF_SUCCESS;
2269
2270 /*
2271 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count
2272 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
2273 * implementation causes a VMX-abort followed by a triple-fault.
2274 */
2275 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
2276 if (fIsMsrCountValid)
2277 { /* likely */ }
2278 else
2279 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount);
2280
2281 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrLoad.u;
2282 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
2283 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
2284 if (RT_SUCCESS(rc))
2285 {
2286 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
2287 Assert(pMsr);
2288 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
2289 {
2290 if ( !pMsr->u32Reserved
2291 && pMsr->u32Msr != MSR_K8_FS_BASE
2292 && pMsr->u32Msr != MSR_K8_GS_BASE
2293 && pMsr->u32Msr != MSR_K6_EFER
2294 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
2295 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
2296 {
2297 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
2298 if (rcStrict == VINF_SUCCESS)
2299 continue;
2300
2301 /*
2302 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
2303 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
2304 * recording the MSR index in the auxiliary info. field and indicated further by our
2305 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
2306 * if possible, or come up with a better, generic solution.
2307 */
2308 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2309 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
2310 ? kVmxVDiag_Vmexit_MsrLoadRing3
2311 : kVmxVDiag_Vmexit_MsrLoad;
2312 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2313 }
2314 else
2315 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadRsvd);
2316 }
2317 }
2318 else
2319 {
2320 AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
2321 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys);
2322 }
2323
2324 NOREF(uExitReason);
2325 NOREF(pszFailure);
2326 return VINF_SUCCESS;
2327}
2328
2329
2330/**
2331 * Loads the host state as part of VM-exit.
2332 *
2333 * @returns Strict VBox status code.
2334 * @param pVCpu The cross context virtual CPU structure.
2335 * @param uExitReason The VM-exit reason (for logging purposes).
2336 */
2337IEM_STATIC VBOXSTRICTRC iemVmxVmexitLoadHostState(PVMCPU pVCpu, uint32_t uExitReason)
2338{
2339 /*
2340 * Load host state.
2341 * See Intel spec. 27.5 "Loading Host State".
2342 */
2343 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2344 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2345
2346 /* We cannot return from a long-mode guest to a host that is not in long mode. */
2347 if ( CPUMIsGuestInLongMode(pVCpu)
2348 && !fHostInLongMode)
2349 {
2350 Log(("VM-exit from long-mode guest to host not in long-mode -> VMX-Abort\n"));
2351 return iemVmxAbort(pVCpu, VMXABORT_HOST_NOT_IN_LONG_MODE);
2352 }
2353
2354 iemVmxVmexitLoadHostControlRegsMsrs(pVCpu);
2355 iemVmxVmexitLoadHostSegRegs(pVCpu);
2356
2357 /*
2358 * Load host RIP, RSP and RFLAGS.
2359 * See Intel spec. 27.5.3 "Loading Host RIP, RSP and RFLAGS"
2360 */
2361 pVCpu->cpum.GstCtx.rip = pVmcs->u64HostRip.u;
2362 pVCpu->cpum.GstCtx.rsp = pVmcs->u64HostRsp.u;
2363 pVCpu->cpum.GstCtx.rflags.u = X86_EFL_1;
2364
2365 /* Clear address range monitoring. */
2366 EMMonitorWaitClear(pVCpu);
2367
2368 /* Perform the VMX transition (PGM updates). */
2369 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
2370 if (rcStrict == VINF_SUCCESS)
2371 {
2372 /* Check host PDPTEs (only when we've fully switched page tables_. */
2373 /** @todo r=ramshankar: I don't know if PGM does this for us already or not... */
2374 int rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason);
2375 if (RT_FAILURE(rc))
2376 {
2377 Log(("VM-exit failed while restoring host PDPTEs -> VMX-Abort\n"));
2378 return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
2379 }
2380 }
2381 else if (RT_SUCCESS(rcStrict))
2382 {
2383 Log3(("VM-exit: iemVmxWorldSwitch returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),
2384 uExitReason));
2385 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2386 }
2387 else
2388 {
2389 Log3(("VM-exit: iemVmxWorldSwitch failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
2390 return VBOXSTRICTRC_VAL(rcStrict);
2391 }
2392
2393 Assert(rcStrict == VINF_SUCCESS);
2394
2395 /* Load MSRs from the VM-exit auto-load MSR area. */
2396 int rc = iemVmxVmexitLoadHostAutoMsrs(pVCpu, uExitReason);
2397 if (RT_FAILURE(rc))
2398 {
2399 Log(("VM-exit failed while loading host MSRs -> VMX-Abort\n"));
2400 return iemVmxAbort(pVCpu, VMXABORT_LOAD_HOST_MSR);
2401 }
2402
2403 return rcStrict;
2404}
2405
2406
2407/**
2408 * Gets VM-exit instruction information along with any displacement for an
2409 * instruction VM-exit.
2410 *
2411 * @returns The VM-exit instruction information.
2412 * @param pVCpu The cross context virtual CPU structure.
2413 * @param uExitReason The VM-exit reason.
2414 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_XXX).
2415 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
2416 * NULL.
2417 */
2418IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, PRTGCPTR pGCPtrDisp)
2419{
2420 RTGCPTR GCPtrDisp;
2421 VMXEXITINSTRINFO ExitInstrInfo;
2422 ExitInstrInfo.u = 0;
2423
2424 /*
2425 * Get and parse the ModR/M byte from our decoded opcodes.
2426 */
2427 uint8_t bRm;
2428 uint8_t const offModRm = pVCpu->iem.s.offModRm;
2429 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
2430 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2431 {
2432 /*
2433 * ModR/M indicates register addressing.
2434 *
2435 * The primary/secondary register operands are reported in the iReg1 or iReg2
2436 * fields depending on whether it is a read/write form.
2437 */
2438 uint8_t idxReg1;
2439 uint8_t idxReg2;
2440 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
2441 {
2442 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
2443 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
2444 }
2445 else
2446 {
2447 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
2448 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
2449 }
2450 ExitInstrInfo.All.u2Scaling = 0;
2451 ExitInstrInfo.All.iReg1 = idxReg1;
2452 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
2453 ExitInstrInfo.All.fIsRegOperand = 1;
2454 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
2455 ExitInstrInfo.All.iSegReg = 0;
2456 ExitInstrInfo.All.iIdxReg = 0;
2457 ExitInstrInfo.All.fIdxRegInvalid = 1;
2458 ExitInstrInfo.All.iBaseReg = 0;
2459 ExitInstrInfo.All.fBaseRegInvalid = 1;
2460 ExitInstrInfo.All.iReg2 = idxReg2;
2461
2462 /* Displacement not applicable for register addressing. */
2463 GCPtrDisp = 0;
2464 }
2465 else
2466 {
2467 /*
2468 * ModR/M indicates memory addressing.
2469 */
2470 uint8_t uScale = 0;
2471 bool fBaseRegValid = false;
2472 bool fIdxRegValid = false;
2473 uint8_t iBaseReg = 0;
2474 uint8_t iIdxReg = 0;
2475 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
2476 {
2477 /*
2478 * Parse the ModR/M, displacement for 16-bit addressing mode.
2479 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
2480 */
2481 uint16_t u16Disp = 0;
2482 uint8_t const offDisp = offModRm + sizeof(bRm);
2483 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
2484 {
2485 /* Displacement without any registers. */
2486 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
2487 }
2488 else
2489 {
2490 /* Register (index and base). */
2491 switch (bRm & X86_MODRM_RM_MASK)
2492 {
2493 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2494 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2495 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2496 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2497 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2498 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2499 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
2500 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
2501 }
2502
2503 /* Register + displacement. */
2504 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2505 {
2506 case 0: break;
2507 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
2508 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
2509 default:
2510 {
2511 /* Register addressing, handled at the beginning. */
2512 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2513 break;
2514 }
2515 }
2516 }
2517
2518 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
2519 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
2520 }
2521 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
2522 {
2523 /*
2524 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
2525 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
2526 */
2527 uint32_t u32Disp = 0;
2528 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
2529 {
2530 /* Displacement without any registers. */
2531 uint8_t const offDisp = offModRm + sizeof(bRm);
2532 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
2533 }
2534 else
2535 {
2536 /* Register (and perhaps scale, index and base). */
2537 uint8_t offDisp = offModRm + sizeof(bRm);
2538 iBaseReg = (bRm & X86_MODRM_RM_MASK);
2539 if (iBaseReg == 4)
2540 {
2541 /* An SIB byte follows the ModR/M byte, parse it. */
2542 uint8_t bSib;
2543 uint8_t const offSib = offModRm + sizeof(bRm);
2544 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
2545
2546 /* A displacement may follow SIB, update its offset. */
2547 offDisp += sizeof(bSib);
2548
2549 /* Get the scale. */
2550 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
2551
2552 /* Get the index register. */
2553 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
2554 fIdxRegValid = RT_BOOL(iIdxReg != 4);
2555
2556 /* Get the base register. */
2557 iBaseReg = bSib & X86_SIB_BASE_MASK;
2558 fBaseRegValid = true;
2559 if (iBaseReg == 5)
2560 {
2561 if ((bRm & X86_MODRM_MOD_MASK) == 0)
2562 {
2563 /* Mod is 0 implies a 32-bit displacement with no base. */
2564 fBaseRegValid = false;
2565 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
2566 }
2567 else
2568 {
2569 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
2570 iBaseReg = X86_GREG_xBP;
2571 }
2572 }
2573 }
2574
2575 /* Register + displacement. */
2576 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2577 {
2578 case 0: /* Handled above */ break;
2579 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
2580 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
2581 default:
2582 {
2583 /* Register addressing, handled at the beginning. */
2584 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2585 break;
2586 }
2587 }
2588 }
2589
2590 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
2591 }
2592 else
2593 {
2594 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
2595
2596 /*
2597 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
2598 * See Intel instruction spec. 2.2 "IA-32e Mode".
2599 */
2600 uint64_t u64Disp = 0;
2601 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
2602 if (fRipRelativeAddr)
2603 {
2604 /*
2605 * RIP-relative addressing mode.
2606 *
2607 * The displacement is 32-bit signed implying an offset range of +/-2G.
2608 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
2609 */
2610 uint8_t const offDisp = offModRm + sizeof(bRm);
2611 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
2612 }
2613 else
2614 {
2615 uint8_t offDisp = offModRm + sizeof(bRm);
2616
2617 /*
2618 * Register (and perhaps scale, index and base).
2619 *
2620 * REX.B extends the most-significant bit of the base register. However, REX.B
2621 * is ignored while determining whether an SIB follows the opcode. Hence, we
2622 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
2623 *
2624 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
2625 */
2626 iBaseReg = (bRm & X86_MODRM_RM_MASK);
2627 if (iBaseReg == 4)
2628 {
2629 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
2630 uint8_t bSib;
2631 uint8_t const offSib = offModRm + sizeof(bRm);
2632 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
2633
2634 /* Displacement may follow SIB, update its offset. */
2635 offDisp += sizeof(bSib);
2636
2637 /* Get the scale. */
2638 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
2639
2640 /* Get the index. */
2641 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
2642 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
2643
2644 /* Get the base. */
2645 iBaseReg = (bSib & X86_SIB_BASE_MASK);
2646 fBaseRegValid = true;
2647 if (iBaseReg == 5)
2648 {
2649 if ((bRm & X86_MODRM_MOD_MASK) == 0)
2650 {
2651 /* Mod is 0 implies a signed 32-bit displacement with no base. */
2652 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
2653 }
2654 else
2655 {
2656 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
2657 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
2658 }
2659 }
2660 }
2661 iBaseReg |= pVCpu->iem.s.uRexB;
2662
2663 /* Register + displacement. */
2664 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2665 {
2666 case 0: /* Handled above */ break;
2667 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
2668 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
2669 default:
2670 {
2671 /* Register addressing, handled at the beginning. */
2672 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2673 break;
2674 }
2675 }
2676 }
2677
2678 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
2679 }
2680
2681 /*
2682 * The primary or secondary register operand is reported in iReg2 depending
2683 * on whether the primary operand is in read/write form.
2684 */
2685 uint8_t idxReg2;
2686 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
2687 {
2688 idxReg2 = bRm & X86_MODRM_RM_MASK;
2689 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
2690 idxReg2 |= pVCpu->iem.s.uRexB;
2691 }
2692 else
2693 {
2694 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK;
2695 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
2696 idxReg2 |= pVCpu->iem.s.uRexReg;
2697 }
2698 ExitInstrInfo.All.u2Scaling = uScale;
2699 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */
2700 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
2701 ExitInstrInfo.All.fIsRegOperand = 0;
2702 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
2703 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
2704 ExitInstrInfo.All.iIdxReg = iIdxReg;
2705 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
2706 ExitInstrInfo.All.iBaseReg = iBaseReg;
2707 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
2708 ExitInstrInfo.All.iReg2 = idxReg2;
2709 }
2710
2711 /*
2712 * Handle exceptions to the norm for certain instructions.
2713 * (e.g. some instructions convey an instruction identity in place of iReg2).
2714 */
2715 switch (uExitReason)
2716 {
2717 case VMX_EXIT_GDTR_IDTR_ACCESS:
2718 {
2719 Assert(VMXINSTRID_IS_VALID(uInstrId));
2720 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
2721 ExitInstrInfo.GdtIdt.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
2722 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
2723 break;
2724 }
2725
2726 case VMX_EXIT_LDTR_TR_ACCESS:
2727 {
2728 Assert(VMXINSTRID_IS_VALID(uInstrId));
2729 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
2730 ExitInstrInfo.LdtTr.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
2731 ExitInstrInfo.LdtTr.u2Undef0 = 0;
2732 break;
2733 }
2734
2735 case VMX_EXIT_RDRAND:
2736 case VMX_EXIT_RDSEED:
2737 {
2738 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
2739 break;
2740 }
2741 }
2742
2743 /* Update displacement and return the constructed VM-exit instruction information field. */
2744 if (pGCPtrDisp)
2745 *pGCPtrDisp = GCPtrDisp;
2746
2747 return ExitInstrInfo.u;
2748}
2749
2750
2751/**
2752 * VMX VM-exit handler.
2753 *
2754 * @returns Strict VBox status code.
2755 * @retval VINF_VMX_VMEXIT when the VM-exit is successful.
2756 * @retval VINF_EM_TRIPLE_FAULT when VM-exit is unsuccessful and leads to a
2757 * triple-fault.
2758 *
2759 * @param pVCpu The cross context virtual CPU structure.
2760 * @param uExitReason The VM-exit reason.
2761 *
2762 * @remarks Make sure VM-exit qualification is updated before calling this
2763 * function!
2764 */
2765IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason)
2766{
2767 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2768 Assert(pVmcs);
2769
2770 pVmcs->u32RoExitReason = uExitReason;
2771
2772 /** @todo NSTVMX: IEMGetCurrentXcpt will be VM-exit interruption info. */
2773 /** @todo NSTVMX: The source event should be recorded in IDT-vectoring info
2774 * during injection. */
2775
2776 /*
2777 * Save the guest state back into the VMCS.
2778 * We only need to save the state when the VM-entry was successful.
2779 */
2780 bool const fVmentryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
2781 if (!fVmentryFailed)
2782 {
2783 iemVmxVmexitSaveGuestState(pVCpu, uExitReason);
2784 int rc = iemVmxVmexitSaveGuestAutoMsrs(pVCpu, uExitReason);
2785 if (RT_SUCCESS(rc))
2786 { /* likely */ }
2787 else
2788 return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS);
2789 }
2790 else
2791 {
2792 /* Restore force-flags that may or may not have been cleared as part of the failed VM-entry. */
2793 iemVmxVmexitRestoreForceFlags(pVCpu);
2794 }
2795
2796 /*
2797 * The high bits of the VM-exit reason are only relevant when the VM-exit occurs in
2798 * enclave mode/SMM which we don't support yet. If we ever add support for it, we can
2799 * pass just the lower bits, till then an assert should suffice.
2800 */
2801 Assert(!RT_HI_U16(uExitReason));
2802
2803 VBOXSTRICTRC rcStrict = iemVmxVmexitLoadHostState(pVCpu, uExitReason);
2804 if (RT_FAILURE(rcStrict))
2805 LogFunc(("Loading host-state failed. uExitReason=%u rc=%Rrc\n", uExitReason, VBOXSTRICTRC_VAL(rcStrict)));
2806
2807 /* We're no longer in nested-guest execution mode. */
2808 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
2809
2810 Assert(rcStrict == VINF_SUCCESS);
2811 return VINF_VMX_VMEXIT;
2812}
2813
2814
2815/**
2816 * VMX VM-exit handler for VM-exits due to instruction execution.
2817 *
2818 * This is intended for instructions where the caller provides all the relevant
2819 * VM-exit information.
2820 *
2821 * @returns Strict VBox status code.
2822 * @param pVCpu The cross context virtual CPU structure.
2823 * @param pExitInfo Pointer to the VM-exit instruction information struct.
2824 */
2825DECLINLINE(VBOXSTRICTRC) iemVmxVmexitInstrWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
2826{
2827 /*
2828 * For instructions where any of the following fields are not applicable:
2829 * - VM-exit instruction info. is undefined.
2830 * - VM-exit qualification must be cleared.
2831 * - VM-exit guest-linear address is undefined.
2832 * - VM-exit guest-physical address is undefined.
2833 *
2834 * The VM-exit instruction length is mandatory for all VM-exits that are caused by
2835 * instruction execution. For VM-exits that are not due to instruction execution this
2836 * field is undefined.
2837 *
2838 * In our implementation in IEM, all undefined fields are generally cleared. However,
2839 * if the caller supplies information (from say the physical CPU directly) it is
2840 * then possible that the undefined fields are not cleared.
2841 *
2842 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2843 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
2844 */
2845 Assert(pExitInfo);
2846 AssertMsg(pExitInfo->uReason <= VMX_EXIT_MAX, ("uReason=%u\n", pExitInfo->uReason));
2847 AssertMsg(pExitInfo->cbInstr >= 1 && pExitInfo->cbInstr <= 15,
2848 ("uReason=%u cbInstr=%u\n", pExitInfo->uReason, pExitInfo->cbInstr));
2849
2850 /* Update all the relevant fields from the VM-exit instruction information struct. */
2851 iemVmxVmcsSetExitInstrInfo(pVCpu, pExitInfo->InstrInfo.u);
2852 iemVmxVmcsSetExitQual(pVCpu, pExitInfo->u64Qual);
2853 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, pExitInfo->u64GuestLinearAddr);
2854 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, pExitInfo->u64GuestPhysAddr);
2855 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
2856
2857 /* Perform the VM-exit. */
2858 return iemVmxVmexit(pVCpu, pExitInfo->uReason);
2859}
2860
2861
2862/**
2863 * VMX VM-exit handler for VM-exits due to instruction execution.
2864 *
2865 * This is intended for instructions that only provide the VM-exit instruction
2866 * length.
2867 *
2868 * @param pVCpu The cross context virtual CPU structure.
2869 * @param uExitReason The VM-exit reason.
2870 * @param cbInstr The instruction length in bytes.
2871 */
2872IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstr(PVMCPU pVCpu, uint32_t uExitReason, uint8_t cbInstr)
2873{
2874 VMXVEXITINFO ExitInfo;
2875 RT_ZERO(ExitInfo);
2876 ExitInfo.uReason = uExitReason;
2877 ExitInfo.cbInstr = cbInstr;
2878
2879#ifdef VBOX_STRICT
2880 /* To prevent us from shooting ourselves in the foot. Maybe remove later. */
2881 switch (uExitReason)
2882 {
2883 case VMX_EXIT_INVEPT:
2884 case VMX_EXIT_INVPCID:
2885 case VMX_EXIT_LDTR_TR_ACCESS:
2886 case VMX_EXIT_GDTR_IDTR_ACCESS:
2887 case VMX_EXIT_VMCLEAR:
2888 case VMX_EXIT_VMPTRLD:
2889 case VMX_EXIT_VMPTRST:
2890 case VMX_EXIT_VMREAD:
2891 case VMX_EXIT_VMWRITE:
2892 case VMX_EXIT_VMXON:
2893 case VMX_EXIT_XRSTORS:
2894 case VMX_EXIT_XSAVES:
2895 case VMX_EXIT_RDRAND:
2896 case VMX_EXIT_RDSEED:
2897 case VMX_EXIT_IO_INSTR:
2898 AssertMsgFailedReturn(("Use iemVmxVmexitInstrNeedsInfo for uExitReason=%u\n", uExitReason), VERR_IEM_IPE_5);
2899 break;
2900 }
2901#endif
2902
2903 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2904}
2905
2906
2907/**
2908 * VMX VM-exit handler for VM-exits due to instruction execution.
2909 *
2910 * This is intended for instructions that have a ModR/M byte and update the VM-exit
2911 * instruction information and VM-exit qualification fields.
2912 *
2913 * @param pVCpu The cross context virtual CPU structure.
2914 * @param uExitReason The VM-exit reason.
2915 * @param uInstrid The instruction identity (VMXINSTRID_XXX).
2916 * @param cbInstr The instruction length in bytes.
2917 *
2918 * @remarks Do not use this for INS/OUTS instruction.
2919 */
2920IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr)
2921{
2922 VMXVEXITINFO ExitInfo;
2923 RT_ZERO(ExitInfo);
2924 ExitInfo.uReason = uExitReason;
2925 ExitInfo.cbInstr = cbInstr;
2926
2927 /*
2928 * Update the VM-exit qualification field with displacement bytes.
2929 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2930 */
2931 switch (uExitReason)
2932 {
2933 case VMX_EXIT_INVEPT:
2934 case VMX_EXIT_INVPCID:
2935 case VMX_EXIT_LDTR_TR_ACCESS:
2936 case VMX_EXIT_GDTR_IDTR_ACCESS:
2937 case VMX_EXIT_VMCLEAR:
2938 case VMX_EXIT_VMPTRLD:
2939 case VMX_EXIT_VMPTRST:
2940 case VMX_EXIT_VMREAD:
2941 case VMX_EXIT_VMWRITE:
2942 case VMX_EXIT_VMXON:
2943 case VMX_EXIT_XRSTORS:
2944 case VMX_EXIT_XSAVES:
2945 case VMX_EXIT_RDRAND:
2946 case VMX_EXIT_RDSEED:
2947 {
2948 /* Construct the VM-exit instruction information. */
2949 RTGCPTR GCPtrDisp;
2950 uint32_t const uInstrInfo = iemVmxGetExitInstrInfo(pVCpu, uExitReason, uInstrId, &GCPtrDisp);
2951
2952 /* Update the VM-exit instruction information. */
2953 ExitInfo.InstrInfo.u = uInstrInfo;
2954
2955 /* Update the VM-exit qualification. */
2956 ExitInfo.u64Qual = GCPtrDisp;
2957 break;
2958 }
2959
2960 default:
2961 AssertMsgFailedReturn(("Use instruction-specific handler\n"), VERR_IEM_IPE_5);
2962 break;
2963 }
2964
2965 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2966}
2967
2968
2969/**
2970 * Checks whether an I/O instruction for the given port is intercepted (causes a
2971 * VM-exit) or not.
2972 *
2973 * @returns @c true if the instruction is intercepted, @c false otherwise.
2974 * @param pVCpu The cross context virtual CPU structure.
2975 * @param u16Port The I/O port being accessed by the instruction.
2976 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
2977 */
2978IEM_STATIC bool iemVmxIsIoInterceptSet(PVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
2979{
2980 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2981 Assert(pVmcs);
2982
2983 /*
2984 * Check whether the I/O instruction must cause a VM-exit or not.
2985 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2986 */
2987 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT)
2988 return true;
2989
2990 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
2991 {
2992 uint8_t const *pbIoBitmapA = (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvIoBitmap);
2993 uint8_t const *pbIoBitmapB = (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvIoBitmap) + VMX_V_IO_BITMAP_A_SIZE;
2994 Assert(pbIoBitmapA);
2995 Assert(pbIoBitmapB);
2996 return HMVmxGetIoBitmapPermission(pbIoBitmapA, pbIoBitmapB, u16Port, cbAccess);
2997 }
2998
2999 return false;
3000}
3001
3002
3003/**
3004 * VMX VM-exit handler for VM-exits due to INVLPG.
3005 *
3006 * @param pVCpu The cross context virtual CPU structure.
3007 * @param GCPtrPage The guest-linear address of the page being invalidated.
3008 * @param cbInstr The instruction length in bytes.
3009 */
3010IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrInvlpg(PVMCPU pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr)
3011{
3012 VMXVEXITINFO ExitInfo;
3013 RT_ZERO(ExitInfo);
3014 ExitInfo.uReason = VMX_EXIT_INVLPG;
3015 ExitInfo.cbInstr = cbInstr;
3016 ExitInfo.u64Qual = GCPtrPage;
3017 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(ExitInfo.u64Qual));
3018
3019 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3020}
3021
3022
3023/**
3024 * VMX VM-exit handler for VM-exits due to LMSW.
3025 *
3026 * @returns Strict VBox status code.
3027 * @param pVCpu The cross context virtual CPU structure.
3028 * @param uGuestCr0 The current guest CR0.
3029 * @param pu16NewMsw The machine-status word specified in LMSW's source
3030 * operand. This will be updated depending on the VMX
3031 * guest/host CR0 mask if LMSW is not intercepted.
3032 * @param GCPtrEffDst The guest-linear address of the source operand in case
3033 * of a memory operand. For register operand, pass
3034 * NIL_RTGCPTR.
3035 * @param cbInstr The instruction length in bytes.
3036 */
3037IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPU pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw, RTGCPTR GCPtrEffDst,
3038 uint8_t cbInstr)
3039{
3040 /*
3041 * LMSW VM-exits are subject to the CR0 guest/host mask and the CR0 read shadow.
3042 *
3043 * See Intel spec. 24.6.6 "Guest/Host Masks and Read Shadows for CR0 and CR4".
3044 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3045 */
3046 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3047 Assert(pVmcs);
3048 Assert(pu16NewMsw);
3049
3050 bool fIntercept = false;
3051 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
3052 uint32_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
3053
3054 /*
3055 * LMSW can never clear CR0.PE but it may set it. Hence, we handle the
3056 * CR0.PE case first, before the rest of the bits in the MSW.
3057 *
3058 * If CR0.PE is owned by the host and CR0.PE differs between the
3059 * MSW (source operand) and the read-shadow, we must cause a VM-exit.
3060 */
3061 if ( (fGstHostMask & X86_CR0_PE)
3062 && (*pu16NewMsw & X86_CR0_PE)
3063 && !(fReadShadow & X86_CR0_PE))
3064 fIntercept = true;
3065
3066 /*
3067 * If CR0.MP, CR0.EM or CR0.TS is owned by the host, and the corresponding
3068 * bits differ between the MSW (source operand) and the read-shadow, we must
3069 * cause a VM-exit.
3070 */
3071 uint32_t fGstHostLmswMask = fGstHostMask & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3072 if ((fReadShadow & fGstHostLmswMask) != (*pu16NewMsw & fGstHostLmswMask))
3073 fIntercept = true;
3074
3075 if (fIntercept)
3076 {
3077 Log2(("lmsw: Guest intercept -> VM-exit\n"));
3078
3079 VMXVEXITINFO ExitInfo;
3080 RT_ZERO(ExitInfo);
3081 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3082 ExitInfo.cbInstr = cbInstr;
3083
3084 bool const fMemOperand = RT_BOOL(GCPtrEffDst != NIL_RTGCPTR);
3085 if (fMemOperand)
3086 {
3087 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(GCPtrEffDst));
3088 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
3089 }
3090
3091 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
3092 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_LMSW)
3093 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_OP, fMemOperand)
3094 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_DATA, *pu16NewMsw);
3095
3096 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3097 }
3098
3099 /*
3100 * If LMSW did not cause a VM-exit, any CR0 bits in the range 0:3 that is set in the
3101 * CR0 guest/host mask must be left unmodified.
3102 *
3103 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3104 */
3105 fGstHostLmswMask = fGstHostMask & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3106 *pu16NewMsw = (uGuestCr0 & fGstHostLmswMask) | (*pu16NewMsw & ~fGstHostLmswMask);
3107
3108 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3109}
3110
3111
3112/**
3113 * VMX VM-exit handler for VM-exits due to CLTS.
3114 *
3115 * @returns Strict VBox status code.
3116 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the CLTS instruction did not cause a
3117 * VM-exit but must not modify the guest CR0.TS bit.
3118 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the CLTS instruction did not cause a
3119 * VM-exit and modification to the guest CR0.TS bit is allowed (subject to
3120 * CR0 fixed bits in VMX operation).
3121 * @param pVCpu The cross context virtual CPU structure.
3122 * @param cbInstr The instruction length in bytes.
3123 */
3124IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPU pVCpu, uint8_t cbInstr)
3125{
3126 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3127 Assert(pVmcs);
3128
3129 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
3130 uint32_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
3131
3132 /*
3133 * If CR0.TS is owned by the host:
3134 * - If CR0.TS is set in the read-shadow, we must cause a VM-exit.
3135 * - If CR0.TS is cleared in the read-shadow, no VM-exit is caused and the
3136 * CLTS instruction completes without clearing CR0.TS.
3137 *
3138 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3139 */
3140 if (fGstHostMask & X86_CR0_TS)
3141 {
3142 if (fReadShadow & X86_CR0_TS)
3143 {
3144 Log2(("clts: Guest intercept -> VM-exit\n"));
3145
3146 VMXVEXITINFO ExitInfo;
3147 RT_ZERO(ExitInfo);
3148 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3149 ExitInfo.cbInstr = cbInstr;
3150
3151 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
3152 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_CLTS);
3153 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3154 }
3155
3156 return VINF_VMX_MODIFIES_BEHAVIOR;
3157 }
3158
3159 /*
3160 * If CR0.TS is not owned by the host, the CLTS instructions operates normally
3161 * and may modify CR0.TS (subject to CR0 fixed bits in VMX operation).
3162 */
3163 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3164}
3165
3166
3167/**
3168 * VMX VM-exit handler for VM-exits due to 'Mov CR0,GReg' and 'Mov CR4,GReg'
3169 * (CR0/CR4 write).
3170 *
3171 * @returns Strict VBox status code.
3172 * @param pVCpu The cross context virtual CPU structure.
3173 * @param iCrReg The control register (either CR0 or CR4).
3174 * @param uGuestCrX The current guest CR0/CR4.
3175 * @param puNewCrX Pointer to the new CR0/CR4 value. Will be updated
3176 * if no VM-exit is caused.
3177 * @param iGReg The general register from which the CR0/CR4 value is
3178 * being loaded.
3179 * @param cbInstr The instruction length in bytes.
3180 */
3181IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr0Cr4(PVMCPU pVCpu, uint8_t iCrReg, uint64_t *puNewCrX, uint8_t iGReg,
3182 uint8_t cbInstr)
3183{
3184 Assert(puNewCrX);
3185 Assert(iCrReg == 0 || iCrReg == 4);
3186
3187 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3188 Assert(pVmcs);
3189
3190 uint64_t uGuestCrX;
3191 uint64_t fGstHostMask;
3192 uint64_t fReadShadow;
3193 if (iCrReg == 0)
3194 {
3195 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
3196 uGuestCrX = pVCpu->cpum.GstCtx.cr0;
3197 fGstHostMask = pVmcs->u64Cr0Mask.u;
3198 fReadShadow = pVmcs->u64Cr0ReadShadow.u;
3199 }
3200 else
3201 {
3202 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
3203 uGuestCrX = pVCpu->cpum.GstCtx.cr4;
3204 fGstHostMask = pVmcs->u64Cr4Mask.u;
3205 fReadShadow = pVmcs->u64Cr4ReadShadow.u;
3206 }
3207
3208 /*
3209 * For any CR0/CR4 bit owned by the host (in the CR0/CR4 guest/host mask), if the
3210 * corresponding bits differ between the source operand and the read-shadow,
3211 * we must cause a VM-exit.
3212 *
3213 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3214 */
3215 if ((fReadShadow & fGstHostMask) != (*puNewCrX & fGstHostMask))
3216 {
3217 Log2(("mov_Cr_Rd: (CR%u) Guest intercept -> VM-exit\n", iCrReg));
3218
3219 VMXVEXITINFO ExitInfo;
3220 RT_ZERO(ExitInfo);
3221 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3222 ExitInfo.cbInstr = cbInstr;
3223
3224 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, iCrReg)
3225 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3226 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3227 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3228 }
3229
3230 /*
3231 * If the Mov-to-CR0/CR4 did not cause a VM-exit, any bits owned by the host
3232 * must not be modified the instruction.
3233 *
3234 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3235 */
3236 *puNewCrX = (uGuestCrX & fGstHostMask) | (*puNewCrX & ~fGstHostMask);
3237
3238 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3239}
3240
3241
3242/**
3243 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR3' (CR3 read).
3244 *
3245 * @returns VBox strict status code.
3246 * @param pVCpu The cross context virtual CPU structure.
3247 * @param iGReg The general register to which the CR3 value is being stored.
3248 * @param cbInstr The instruction length in bytes.
3249 */
3250IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3251{
3252 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3253 Assert(pVmcs);
3254 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
3255
3256 /*
3257 * If the CR3-store exiting control is set, we must cause a VM-exit.
3258 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3259 */
3260 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT)
3261 {
3262 Log2(("mov_Rd_Cr: (CR3) Guest intercept -> VM-exit\n"));
3263
3264 VMXVEXITINFO ExitInfo;
3265 RT_ZERO(ExitInfo);
3266 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3267 ExitInfo.cbInstr = cbInstr;
3268
3269 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
3270 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ)
3271 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3272 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3273 }
3274
3275 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3276}
3277
3278
3279/**
3280 * VMX VM-exit handler for VM-exits due to 'Mov CR3,GReg' (CR3 write).
3281 *
3282 * @returns VBox strict status code.
3283 * @param pVCpu The cross context virtual CPU structure.
3284 * @param uNewCr3 The new CR3 value.
3285 * @param iGReg The general register from which the CR3 value is being
3286 * loaded.
3287 * @param cbInstr The instruction length in bytes.
3288 */
3289IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr3(PVMCPU pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr)
3290{
3291 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3292 Assert(pVmcs);
3293
3294 /*
3295 * If the CR3-load exiting control is set and the new CR3 value does not
3296 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
3297 *
3298 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3299 */
3300 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_LOAD_EXIT)
3301 {
3302 uint32_t uCr3TargetCount = pVmcs->u32Cr3TargetCount;
3303 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
3304
3305 for (uint32_t idxCr3Target = 0; idxCr3Target < uCr3TargetCount; idxCr3Target++)
3306 {
3307 uint64_t const uCr3TargetValue = iemVmxVmcsGetCr3TargetValue(pVmcs, idxCr3Target);
3308 if (uNewCr3 != uCr3TargetValue)
3309 {
3310 Log2(("mov_Cr_Rd: (CR3) Guest intercept -> VM-exit\n"));
3311
3312 VMXVEXITINFO ExitInfo;
3313 RT_ZERO(ExitInfo);
3314 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3315 ExitInfo.cbInstr = cbInstr;
3316
3317 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
3318 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3319 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3320 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3321 }
3322 }
3323 }
3324
3325 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3326}
3327
3328
3329/**
3330 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR8' (CR8 read).
3331 *
3332 * @returns VBox strict status code.
3333 * @param pVCpu The cross context virtual CPU structure.
3334 * @param iGReg The general register to which the CR8 value is being stored.
3335 * @param cbInstr The instruction length in bytes.
3336 */
3337IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3338{
3339 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3340 Assert(pVmcs);
3341
3342 /*
3343 * If the CR8-store exiting control is set, we must cause a VM-exit.
3344 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3345 */
3346 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT)
3347 {
3348 Log2(("mov_Rd_Cr: (CR8) Guest intercept -> VM-exit\n"));
3349
3350 VMXVEXITINFO ExitInfo;
3351 RT_ZERO(ExitInfo);
3352 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3353 ExitInfo.cbInstr = cbInstr;
3354
3355 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
3356 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ)
3357 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3358 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3359 }
3360
3361 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3362}
3363
3364
3365/**
3366 * VMX VM-exit handler for VM-exits due to 'Mov CR8,GReg' (CR8 write).
3367 *
3368 * @returns VBox strict status code.
3369 * @param pVCpu The cross context virtual CPU structure.
3370 * @param iGReg The general register from which the CR8 value is being
3371 * loaded.
3372 * @param cbInstr The instruction length in bytes.
3373 */
3374IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3375{
3376 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3377 Assert(pVmcs);
3378
3379 /*
3380 * If the CR8-load exiting control is set, we must cause a VM-exit.
3381 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3382 */
3383 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT)
3384 {
3385 Log2(("mov_Cr_Rd: (CR8) Guest intercept -> VM-exit\n"));
3386
3387 VMXVEXITINFO ExitInfo;
3388 RT_ZERO(ExitInfo);
3389 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3390 ExitInfo.cbInstr = cbInstr;
3391
3392 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
3393 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3394 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3395 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3396 }
3397
3398 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3399}
3400
3401
3402/**
3403 * VMX VM-exit handler for VM-exits due to 'Mov DRx,GReg' (DRx write) and 'Mov
3404 * GReg,DRx' (DRx read).
3405 *
3406 * @returns VBox strict status code.
3407 * @param pVCpu The cross context virtual CPU structure.
3408 * @param uInstrid The instruction identity (VMXINSTRID_MOV_TO_DRX or
3409 * VMXINSTRID_MOV_FROM_DRX).
3410 * @param iDrReg The debug register being accessed.
3411 * @param iGReg The general register to/from which the DRx value is being
3412 * store/loaded.
3413 * @param cbInstr The instruction length in bytes.
3414 */
3415IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovDrX(PVMCPU pVCpu, VMXINSTRID uInstrId, uint8_t iDrReg, uint8_t iGReg,
3416 uint8_t cbInstr)
3417{
3418 Assert(iDrReg <= 7);
3419 Assert(uInstrId == VMXINSTRID_MOV_TO_DRX || uInstrId == VMXINSTRID_MOV_FROM_DRX);
3420
3421 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3422 Assert(pVmcs);
3423
3424 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT)
3425 {
3426 uint32_t const uDirection = uInstrId == VMXINSTRID_MOV_TO_DRX ? VMX_EXIT_QUAL_DRX_DIRECTION_WRITE
3427 : VMX_EXIT_QUAL_DRX_DIRECTION_READ;
3428 VMXVEXITINFO ExitInfo;
3429 RT_ZERO(ExitInfo);
3430 ExitInfo.uReason = VMX_EXIT_MOV_DRX;
3431 ExitInfo.cbInstr = cbInstr;
3432 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_REGISTER, iDrReg)
3433 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_DIRECTION, uDirection)
3434 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_GENREG, iGReg);
3435 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3436 }
3437
3438 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3439}
3440
3441
3442/**
3443 * VMX VM-exit handler for VM-exits due to I/O instructions (IN and OUT).
3444 *
3445 * @returns VBox strict status code.
3446 * @param pVCpu The cross context virtual CPU structure.
3447 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_IO_IN or
3448 * VMXINSTRID_IO_OUT).
3449 * @param u16Port The I/O port being accessed.
3450 * @param fImm Whether the I/O port was encoded using an immediate operand
3451 * or the implicit DX register.
3452 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3453 * @param cbInstr The instruction length in bytes.
3454 */
3455IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrIo(PVMCPU pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, bool fImm, uint8_t cbAccess,
3456 uint8_t cbInstr)
3457{
3458 Assert(uInstrId == VMXINSTRID_IO_IN || uInstrId == VMXINSTRID_IO_OUT);
3459 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3460
3461 bool const fIntercept = iemVmxIsIoInterceptSet(pVCpu, u16Port, cbAccess);
3462 if (fIntercept)
3463 {
3464 uint32_t const uDirection = uInstrId == VMXINSTRID_IO_IN ? VMX_EXIT_QUAL_IO_DIRECTION_IN
3465 : VMX_EXIT_QUAL_IO_DIRECTION_OUT;
3466 VMXVEXITINFO ExitInfo;
3467 RT_ZERO(ExitInfo);
3468 ExitInfo.uReason = VMX_EXIT_IO_INSTR;
3469 ExitInfo.cbInstr = cbInstr;
3470 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_WIDTH, cbAccess - 1)
3471 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_DIRECTION, uDirection)
3472 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_ENCODING, fImm)
3473 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_PORT, u16Port);
3474 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3475 }
3476
3477 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3478}
3479
3480
3481/**
3482 * VMX VM-exit handler for VM-exits due to string I/O instructions (INS and OUTS).
3483 *
3484 * @returns VBox strict status code.
3485 * @param pVCpu The cross context virtual CPU structure.
3486 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_IO_INS or
3487 * VMXINSTRID_IO_OUTS).
3488 * @param u16Port The I/O port being accessed.
3489 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3490 * @param fRep Whether the instruction has a REP prefix or not.
3491 * @param ExitInstrInfo The VM-exit instruction info. field.
3492 * @param cbInstr The instruction length in bytes.
3493 */
3494IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrStrIo(PVMCPU pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, uint8_t cbAccess, bool fRep,
3495 VMXEXITINSTRINFO ExitInstrInfo, uint8_t cbInstr)
3496{
3497 Assert(uInstrId == VMXINSTRID_IO_INS || uInstrId == VMXINSTRID_IO_OUTS);
3498 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3499 Assert(ExitInstrInfo.StrIo.iSegReg < X86_SREG_COUNT);
3500 Assert(ExitInstrInfo.StrIo.u3AddrSize == 0 || ExitInstrInfo.StrIo.u3AddrSize == 1 || ExitInstrInfo.StrIo.u3AddrSize == 2);
3501 Assert(uInstrId != VMXINSTRID_IO_INS || ExitInstrInfo.StrIo.iSegReg == X86_SREG_ES);
3502
3503 bool const fIntercept = iemVmxIsIoInterceptSet(pVCpu, u16Port, cbAccess);
3504 if (fIntercept)
3505 {
3506 /*
3507 * Figure out the guest-linear address and the direction bit (INS/OUTS).
3508 */
3509 /** @todo r=ramshankar: Is there something in IEM that already does this? */
3510 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
3511 uint8_t const iSegReg = ExitInstrInfo.StrIo.iSegReg;
3512 uint8_t const uAddrSize = ExitInstrInfo.StrIo.u3AddrSize;
3513 uint64_t const uAddrSizeMask = s_auAddrSizeMasks[uAddrSize];
3514
3515 uint32_t uDirection;
3516 uint64_t uGuestLinearAddr;
3517 if (uInstrId == VMXINSTRID_IO_INS)
3518 {
3519 uDirection = VMX_EXIT_QUAL_IO_DIRECTION_IN;
3520 uGuestLinearAddr = pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base + (pVCpu->cpum.GstCtx.rdi & uAddrSizeMask);
3521 }
3522 else
3523 {
3524 uDirection = VMX_EXIT_QUAL_IO_DIRECTION_OUT;
3525 uGuestLinearAddr = pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base + (pVCpu->cpum.GstCtx.rsi & uAddrSizeMask);
3526 }
3527
3528 /*
3529 * If the segment is ununsable, the guest-linear address in undefined.
3530 * We shall clear it for consistency.
3531 *
3532 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
3533 */
3534 if (pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable)
3535 uGuestLinearAddr = 0;
3536
3537 VMXVEXITINFO ExitInfo;
3538 RT_ZERO(ExitInfo);
3539 ExitInfo.uReason = VMX_EXIT_IO_INSTR;
3540 ExitInfo.cbInstr = cbInstr;
3541 ExitInfo.InstrInfo = ExitInstrInfo;
3542 ExitInfo.u64GuestLinearAddr = uGuestLinearAddr;
3543 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_WIDTH, cbAccess - 1)
3544 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_DIRECTION, uDirection)
3545 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_IS_STRING, 1)
3546 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_IS_REP, fRep)
3547 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_ENCODING, VMX_EXIT_QUAL_IO_ENCODING_DX)
3548 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_PORT, u16Port);
3549 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3550 }
3551
3552 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3553}
3554
3555
3556/**
3557 * VMX VM-exit handler for VM-exits due to MWAIT.
3558 *
3559 * @returns VBox strict status code.
3560 * @param pVCpu The cross context virtual CPU structure.
3561 * @param fMonitorHwArmed Whether the address-range monitor hardware is armed.
3562 * @param cbInstr The instruction length in bytes.
3563 */
3564IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMwait(PVMCPU pVCpu, bool fMonitorHwArmed, uint8_t cbInstr)
3565{
3566 VMXVEXITINFO ExitInfo;
3567 RT_ZERO(ExitInfo);
3568 ExitInfo.uReason = VMX_EXIT_MWAIT;
3569 ExitInfo.cbInstr = cbInstr;
3570 ExitInfo.u64Qual = fMonitorHwArmed;
3571 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3572}
3573
3574
3575/**
3576 * VMX VM-exit handler for VM-exits due to PAUSE.
3577 *
3578 * @returns VBox strict status code.
3579 * @param pVCpu The cross context virtual CPU structure.
3580 * @param cbInstr The instruction length in bytes.
3581 */
3582IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrPause(PVMCPU pVCpu, uint8_t cbInstr)
3583{
3584 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3585 Assert(pVmcs);
3586
3587 /*
3588 * The PAUSE VM-exit is controlled by the "PAUSE exiting" control and the
3589 * "PAUSE-loop exiting" control.
3590 *
3591 * The PLE-Gap is the maximum number of TSC ticks between two successive executions of
3592 * the PAUSE instruction before we cause a VM-exit. The PLE-Window is the maximum amount
3593 * of TSC ticks the guest is allowed to execute in a pause loop before we must cause
3594 * a VM-exit.
3595 *
3596 * See Intel spec. 24.6.13 "Controls for PAUSE-Loop Exiting".
3597 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3598 */
3599 bool fIntercept = false;
3600 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_PAUSE_EXIT)
3601 fIntercept = true;
3602 else if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)
3603 && pVCpu->iem.s.uCpl == 0)
3604 {
3605 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
3606
3607 /*
3608 * A previous-PAUSE-tick value of 0 is used to identify the first time
3609 * execution of a PAUSE instruction after VM-entry at CPL 0. We must
3610 * consider this to be the first execution of PAUSE in a loop according
3611 * to the Intel.
3612 *
3613 * All subsequent records for the previous-PAUSE-tick we ensure that it
3614 * cannot be zero by OR'ing 1 to rule out the TSC wrap-around cases at 0.
3615 */
3616 uint64_t *puFirstPauseLoopTick = &pVCpu->cpum.GstCtx.hwvirt.vmx.uFirstPauseLoopTick;
3617 uint64_t *puPrevPauseTick = &pVCpu->cpum.GstCtx.hwvirt.vmx.uPrevPauseTick;
3618 uint64_t const uTick = TMCpuTickGet(pVCpu);
3619 uint32_t const uPleGap = pVmcs->u32PleGap;
3620 uint32_t const uPleWindow = pVmcs->u32PleWindow;
3621 if ( *puPrevPauseTick == 0
3622 || uTick - *puPrevPauseTick > uPleGap)
3623 *puFirstPauseLoopTick = uTick;
3624 else if (uTick - *puFirstPauseLoopTick > uPleWindow)
3625 fIntercept = true;
3626
3627 *puPrevPauseTick = uTick | 1;
3628 }
3629
3630 if (fIntercept)
3631 {
3632 VMXVEXITINFO ExitInfo;
3633 RT_ZERO(ExitInfo);
3634 ExitInfo.uReason = VMX_EXIT_PAUSE;
3635 ExitInfo.cbInstr = cbInstr;
3636 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3637 }
3638
3639 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3640}
3641
3642
3643/**
3644 * VMX VM-exit handler for VM-exits due to task switches.
3645 *
3646 * @returns VBox strict status code.
3647 * @param pVCpu The cross context virtual CPU structure.
3648 * @param enmTaskSwitch The cause of the task switch.
3649 * @param SelNewTss The selector of the new TSS.
3650 * @param cbInstr The instruction length in bytes.
3651 */
3652IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr)
3653{
3654 /*
3655 * Task-switch VM-exits are unconditional and provide the VM-exit qualification.
3656 *
3657 * If the the cause of the task switch is due to execution of CALL, IRET or the JMP
3658 * instruction or delivery of the exception generated by one of these instructions
3659 * lead to a task switch through a task gate in the IDT, we need to provide the
3660 * VM-exit instruction length. Any other means of invoking a task switch VM-exit
3661 * leaves the VM-exit instruction length field undefined.
3662 *
3663 * See Intel spec. 25.2 "Other Causes Of VM Exits".
3664 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
3665 */
3666 Assert(cbInstr <= 15);
3667
3668 uint8_t uType;
3669 switch (enmTaskSwitch)
3670 {
3671 case IEMTASKSWITCH_CALL: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_CALL; break;
3672 case IEMTASKSWITCH_IRET: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IRET; break;
3673 case IEMTASKSWITCH_JUMP: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_JMP; break;
3674 case IEMTASKSWITCH_INT_XCPT: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT; break;
3675 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3676 }
3677
3678 uint64_t const uExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_NEW_TSS, SelNewTss)
3679 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_SOURCE, uType);
3680 iemVmxVmcsSetExitQual(pVCpu, uExitQual);
3681 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
3682 return iemVmxVmexit(pVCpu, VMX_EXIT_TASK_SWITCH);
3683}
3684
3685
3686/**
3687 * VMX VM-exit handler for VM-exits due to expiry of the preemption timer.
3688 *
3689 * @returns VBox strict status code.
3690 * @param pVCpu The cross context virtual CPU structure.
3691 */
3692IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPU pVCpu)
3693{
3694 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3695 Assert(pVmcs);
3696 Assert(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER);
3697 NOREF(pVmcs);
3698
3699 iemVmxVmcsSetExitQual(pVCpu, 0);
3700 return iemVmxVmexit(pVCpu, VMX_EXIT_PREEMPT_TIMER);
3701}
3702
3703
3704/**
3705 * VMX VM-exit handler for VM-exits due to external interrupts.
3706 *
3707 * @returns VBox strict status code.
3708 * @param pVCpu The cross context virtual CPU structure.
3709 * @param uVector The external interrupt vector.
3710 * @param fIntPending Whether the external interrupt is pending or
3711 * acknowdledged in the interrupt controller.
3712 */
3713IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
3714{
3715 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3716 Assert(pVmcs);
3717
3718 /* The VM-exit is subject to "External interrupt exiting" is being set. */
3719 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT)
3720 {
3721 if (fIntPending)
3722 {
3723 /*
3724 * If the interrupt is pending and we don't need to acknowledge the
3725 * interrupt on VM-exit, cause the VM-exit immediately.
3726 *
3727 * See Intel spec 25.2 "Other Causes Of VM Exits".
3728 */
3729 if (!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT))
3730 {
3731 iemVmxVmcsSetExitIntInfo(pVCpu, 0);
3732 iemVmxVmcsSetExitIntErrCode(pVCpu, 0);
3733 iemVmxVmcsSetExitQual(pVCpu, 0);
3734 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT);
3735 }
3736
3737 /*
3738 * If the interrupt is pending and we -do- need to acknowledge the interrupt
3739 * on VM-exit, postpone VM-exit til after the interrupt controller has been
3740 * acknowledged that the interrupt has been consumed.
3741 */
3742 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3743 }
3744
3745 /*
3746 * If the interrupt is no longer pending (i.e. it has been acknowledged) and the
3747 * "External interrupt exiting" and "Acknowledge interrupt on VM-exit" controls are
3748 * all set, we cause the VM-exit now. We need to record the external interrupt that
3749 * just occurred in the VM-exit interruption information field.
3750 *
3751 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events".
3752 */
3753 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
3754 {
3755 uint8_t const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret;
3756 uint32_t const uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, uVector)
3757 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_EXT_INT)
3758 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, fNmiUnblocking)
3759 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3760 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3761 iemVmxVmcsSetExitIntErrCode(pVCpu, 0);
3762 iemVmxVmcsSetExitQual(pVCpu, 0);
3763 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT);
3764 }
3765 }
3766
3767 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3768}
3769
3770
3771/**
3772 * VMX VM-exit handler for VM-exits due to startup-IPIs (SIPI).
3773 *
3774 * @returns VBox strict status code.
3775 * @param pVCpu The cross context virtual CPU structure.
3776 * @param uVector The SIPI vector.
3777 */
3778IEM_STATIC VBOXSTRICTRC iemVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
3779{
3780 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3781 Assert(pVmcs);
3782
3783 iemVmxVmcsSetExitQual(pVCpu, uVector);
3784 return iemVmxVmexit(pVCpu, VMX_EXIT_SIPI);
3785}
3786
3787
3788/**
3789 * VMX VM-exit handler for VM-exits due to init-IPIs (INIT).
3790 *
3791 * @returns VBox strict status code.
3792 * @param pVCpu The cross context virtual CPU structure.
3793 */
3794IEM_STATIC VBOXSTRICTRC iemVmxVmexitInitIpi(PVMCPU pVCpu)
3795{
3796 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3797 Assert(pVmcs);
3798
3799 iemVmxVmcsSetExitQual(pVCpu, 0);
3800 return iemVmxVmexit(pVCpu, VMX_EXIT_INIT_SIGNAL);
3801}
3802
3803
3804/**
3805 * VMX VM-exit handler for interrupt-window VM-exits.
3806 *
3807 * @returns VBox strict status code.
3808 * @param pVCpu The cross context virtual CPU structure.
3809 */
3810IEM_STATIC VBOXSTRICTRC iemVmxVmexitIntWindow(PVMCPU pVCpu)
3811{
3812 iemVmxVmcsSetExitQual(pVCpu, 0);
3813 return iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW);
3814}
3815
3816
3817/**
3818 * VMX VM-exit handler for VM-exits due to delivery of an event.
3819 *
3820 * @returns VBox strict status code.
3821 * @param pVCpu The cross context virtual CPU structure.
3822 * @param uVector The interrupt / exception vector.
3823 * @param fFlags The flags (see IEM_XCPT_FLAGS_XXX).
3824 * @param uErrCode The error code associated with the event.
3825 * @param uCr2 The CR2 value in case of a \#PF exception.
3826 * @param cbInstr The instruction length in bytes.
3827 */
3828IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2,
3829 uint8_t cbInstr)
3830{
3831 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3832 Assert(pVmcs);
3833
3834 /*
3835 * If the event is being injected as part of VM-entry, it isn't subject to event
3836 * intercepts in the nested-guest. However, secondary exceptions that occur during
3837 * injection of any event -are- subject to event interception.
3838 *
3839 * See Intel spec. 26.5.1.2 "VM Exits During Event Injection".
3840 */
3841 if (!pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents)
3842 {
3843 /* Update the IDT-vectoring event in the VMCS as the source of the upcoming event. */
3844 uint8_t const uIdtVectoringType = iemVmxGetEventType(uVector, fFlags);
3845 uint8_t const fErrCodeValid = (fFlags & IEM_XCPT_FLAGS_ERR);
3846 uint32_t const uIdtVectoringInfo = RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_VECTOR, uVector)
3847 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_TYPE, uIdtVectoringType)
3848 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_ERR_CODE_VALID, fErrCodeValid)
3849 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_VALID, 1);
3850 iemVmxVmcsSetIdtVectoringInfo(pVCpu, uIdtVectoringInfo);
3851 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, uErrCode);
3852
3853 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = true;
3854 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3855 }
3856
3857 /*
3858 * We are injecting an external interrupt, check if we need to cause a VM-exit now.
3859 * If not, the caller will continue delivery of the external interrupt as it would
3860 * normally.
3861 */
3862 if (fFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3863 {
3864 Assert(!VMX_IDT_VECTORING_INFO_IS_VALID(pVmcs->u32RoIdtVectoringInfo));
3865 return iemVmxVmexitExtInt(pVCpu, uVector, false /* fIntPending */);
3866 }
3867
3868 /*
3869 * Evaluate intercepts for hardware exceptions including #BP, #DB, #OF
3870 * generated by INT3, INT1 (ICEBP) and INTO respectively.
3871 */
3872 Assert(fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_SOFT_INT));
3873 bool fIntercept = false;
3874 bool fIsHwXcpt = false;
3875 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3876 || (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR)))
3877 {
3878 fIsHwXcpt = true;
3879 /* NMIs have a dedicated VM-execution control for causing VM-exits. */
3880 if (uVector == X86_XCPT_NMI)
3881 fIntercept = RT_BOOL(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT);
3882 else
3883 {
3884 /* Page-faults are subject to masking using its error code. */
3885 uint32_t fXcptBitmap = pVmcs->u32XcptBitmap;
3886 if (uVector == X86_XCPT_PF)
3887 {
3888 uint32_t const fXcptPFMask = pVmcs->u32XcptPFMask;
3889 uint32_t const fXcptPFMatch = pVmcs->u32XcptPFMatch;
3890 if ((uErrCode & fXcptPFMask) != fXcptPFMatch)
3891 fXcptBitmap ^= RT_BIT(X86_XCPT_PF);
3892 }
3893
3894 /* Consult the exception bitmap for all hardware exceptions (except NMI). */
3895 if (fXcptBitmap & RT_BIT(uVector))
3896 fIntercept = true;
3897 }
3898 }
3899 /* else: Software interrupts cannot be intercepted and therefore do not cause a VM-exit. */
3900
3901 /*
3902 * Now that we've determined whether the software interrupt or hardware exception
3903 * causes a VM-exit, we need to construct the relevant VM-exit information and
3904 * cause the VM-exit.
3905 */
3906 if (fIntercept)
3907 {
3908 Assert(!(fFlags & IEM_XCPT_FLAGS_T_EXT_INT));
3909
3910 /* Construct the rest of the event related information fields and cause the VM-exit. */
3911 uint64_t uExitQual = 0;
3912 if (fIsHwXcpt)
3913 {
3914 if (uVector == X86_XCPT_PF)
3915 uExitQual = uCr2;
3916 else if (uVector == X86_XCPT_DB)
3917 {
3918 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR6);
3919 uExitQual = pVCpu->cpum.GstCtx.dr[6] & VMX_VMCS_EXIT_QUAL_VALID_MASK;
3920 }
3921 }
3922
3923 uint8_t const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret;
3924 uint8_t const fErrCodeValid = (fFlags & IEM_XCPT_FLAGS_ERR);
3925 uint8_t const uIntInfoType = iemVmxGetEventType(uVector, fFlags);
3926 uint32_t const uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, uVector)
3927 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, uIntInfoType)
3928 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_ERR_CODE_VALID, fErrCodeValid)
3929 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, fNmiUnblocking)
3930 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3931 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3932 iemVmxVmcsSetExitIntErrCode(pVCpu, uErrCode);
3933 iemVmxVmcsSetExitQual(pVCpu, uExitQual);
3934
3935 /*
3936 * For VM exits due to software exceptions (those generated by INT3 or INTO) or privileged
3937 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
3938 * length.
3939 */
3940 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3941 && (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR)))
3942 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
3943 else
3944 iemVmxVmcsSetExitInstrLen(pVCpu, 0);
3945
3946 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI);
3947 }
3948
3949 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3950}
3951
3952
3953/**
3954 * VMX VM-exit handler for VM-exits due to a triple fault.
3955 *
3956 * @returns VBox strict status code.
3957 * @param pVCpu The cross context virtual CPU structure.
3958 */
3959IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu)
3960{
3961 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3962 Assert(pVmcs);
3963 iemVmxVmcsSetExitQual(pVCpu, 0);
3964 return iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT);
3965}
3966
3967
3968/**
3969 * VMX VM-exit handler for APIC-accesses.
3970 *
3971 * @param pVCpu The cross context virtual CPU structure.
3972 * @param offAccess The offset of the register being accessed.
3973 * @param fAccess The type of access (must contain IEM_ACCESS_TYPE_READ or
3974 * IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION).
3975 */
3976IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess)
3977{
3978 Assert((fAccess & IEM_ACCESS_TYPE_READ) || (fAccess & IEM_ACCESS_TYPE_WRITE) || (fAccess & IEM_ACCESS_INSTRUCTION));
3979
3980 VMXAPICACCESS enmAccess;
3981 bool const fInEventDelivery = IEMGetCurrentXcpt(pVCpu, NULL, NULL, NULL, NULL);
3982 if (fInEventDelivery)
3983 enmAccess = VMXAPICACCESS_LINEAR_EVENT_DELIVERY;
3984 else if (fAccess & IEM_ACCESS_INSTRUCTION)
3985 enmAccess = VMXAPICACCESS_LINEAR_INSTR_FETCH;
3986 else if (fAccess & IEM_ACCESS_TYPE_WRITE)
3987 enmAccess = VMXAPICACCESS_LINEAR_WRITE;
3988 else
3989 enmAccess = VMXAPICACCESS_LINEAR_WRITE;
3990
3991 uint64_t const uExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_OFFSET, offAccess)
3992 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_TYPE, enmAccess);
3993 iemVmxVmcsSetExitQual(pVCpu, uExitQual);
3994 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_ACCESS);
3995}
3996
3997
3998/**
3999 * VMX VM-exit handler for APIC-write VM-exits.
4000 *
4001 * @param pVCpu The cross context virtual CPU structure.
4002 * @param offApic The write to the virtual-APIC page offset that caused this
4003 * VM-exit.
4004 */
4005IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicWrite(PVMCPU pVCpu, uint16_t offApic)
4006{
4007 Assert(offApic < XAPIC_OFF_END + 4);
4008
4009 /* Write only bits 11:0 of the APIC offset into the VM-exit qualification field. */
4010 offApic &= UINT16_C(0xfff);
4011 iemVmxVmcsSetExitQual(pVCpu, offApic);
4012 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_WRITE);
4013}
4014
4015
4016/**
4017 * VMX VM-exit handler for virtualized-EOIs.
4018 *
4019 * @param pVCpu The cross context virtual CPU structure.
4020 */
4021IEM_STATIC VBOXSTRICTRC iemVmxVmexitVirtEoi(PVMCPU pVCpu, uint8_t uVector)
4022{
4023 iemVmxVmcsSetExitQual(pVCpu, uVector);
4024 return iemVmxVmexit(pVCpu, VMX_EXIT_VIRTUALIZED_EOI);
4025}
4026
4027
4028/**
4029 * Sets virtual-APIC write emulation as pending.
4030 *
4031 * @param pVCpu The cross context virtual CPU structure.
4032 * @param offApic The offset in the virtual-APIC page that was written.
4033 */
4034DECLINLINE(void) iemVmxVirtApicSetPendingWrite(PVMCPU pVCpu, uint16_t offApic)
4035{
4036 Assert(offApic < XAPIC_OFF_END + 4);
4037
4038 /*
4039 * Record the currently updated APIC offset, as we need this later for figuring
4040 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4041 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4042 */
4043 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
4044
4045 /*
4046 * Signal that we need to perform virtual-APIC write emulation (TPR/PPR/EOI/Self-IPI
4047 * virtualization or APIC-write emulation).
4048 */
4049 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4050 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
4051}
4052
4053
4054/**
4055 * Clears any pending virtual-APIC write emulation.
4056 *
4057 * @returns The virtual-APIC offset that was written before clearing it.
4058 * @param pVCpu The cross context virtual CPU structure.
4059 */
4060DECLINLINE(uint16_t) iemVmxVirtApicClearPendingWrite(PVMCPU pVCpu)
4061{
4062 uint8_t const offVirtApicWrite = pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite;
4063 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = 0;
4064 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
4065 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
4066 return offVirtApicWrite;
4067}
4068
4069
4070/**
4071 * Reads a 32-bit register from the virtual-APIC page at the given offset.
4072 *
4073 * @returns The register from the virtual-APIC page.
4074 * @param pVCpu The cross context virtual CPU structure.
4075 * @param offReg The offset of the register being read.
4076 */
4077DECLINLINE(uint32_t) iemVmxVirtApicReadRaw32(PVMCPU pVCpu, uint16_t offReg)
4078{
4079 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4080 uint8_t const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4081 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4082 uint32_t const uReg = *(const uint32_t *)(pbVirtApic + offReg);
4083 return uReg;
4084}
4085
4086
4087/**
4088 * Reads a 64-bit register from the virtual-APIC page at the given offset.
4089 *
4090 * @returns The register from the virtual-APIC page.
4091 * @param pVCpu The cross context virtual CPU structure.
4092 * @param offReg The offset of the register being read.
4093 */
4094DECLINLINE(uint64_t) iemVmxVirtApicReadRaw64(PVMCPU pVCpu, uint16_t offReg)
4095{
4096 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4097 uint8_t const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4098 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4099 uint64_t const uReg = *(const uint64_t *)(pbVirtApic + offReg);
4100 return uReg;
4101}
4102
4103
4104/**
4105 * Writes a 32-bit register to the virtual-APIC page at the given offset.
4106 *
4107 * @param pVCpu The cross context virtual CPU structure.
4108 * @param offReg The offset of the register being written.
4109 * @param uReg The register value to write.
4110 */
4111DECLINLINE(void) iemVmxVirtApicWriteRaw32(PVMCPU pVCpu, uint16_t offReg, uint32_t uReg)
4112{
4113 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4114 uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4115 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4116 *(uint32_t *)(pbVirtApic + offReg) = uReg;
4117}
4118
4119
4120/**
4121 * Writes a 64-bit register to the virtual-APIC page at the given offset.
4122 *
4123 * @param pVCpu The cross context virtual CPU structure.
4124 * @param offReg The offset of the register being written.
4125 * @param uReg The register value to write.
4126 */
4127DECLINLINE(void) iemVmxVirtApicWriteRaw64(PVMCPU pVCpu, uint16_t offReg, uint64_t uReg)
4128{
4129 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4130 uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4131 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4132 *(uint64_t *)(pbVirtApic + offReg) = uReg;
4133}
4134
4135
4136/**
4137 * Sets the vector in a virtual-APIC 256-bit sparse register.
4138 *
4139 * @param pVCpu The cross context virtual CPU structure.
4140 * @param offReg The offset of the 256-bit spare register.
4141 * @param uVector The vector to set.
4142 *
4143 * @remarks This is based on our APIC device code.
4144 */
4145DECLINLINE(void) iemVmxVirtApicSetVector(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector)
4146{
4147 Assert(offReg == XAPIC_OFF_ISR0 || offReg == XAPIC_OFF_TMR0 || offReg == XAPIC_OFF_IRR0);
4148 uint8_t *pbBitmap = ((uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)) + offReg;
4149 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1;
4150 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
4151 ASMAtomicBitSet(pbBitmap + offVector, idxVectorBit);
4152}
4153
4154
4155/**
4156 * Clears the vector in a virtual-APIC 256-bit sparse register.
4157 *
4158 * @param pVCpu The cross context virtual CPU structure.
4159 * @param offReg The offset of the 256-bit spare register.
4160 * @param uVector The vector to clear.
4161 *
4162 * @remarks This is based on our APIC device code.
4163 */
4164DECLINLINE(void) iemVmxVirtApicClearVector(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector)
4165{
4166 Assert(offReg == XAPIC_OFF_ISR0 || offReg == XAPIC_OFF_TMR0 || offReg == XAPIC_OFF_IRR0);
4167 uint8_t *pbBitmap = ((uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)) + offReg;
4168 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1;
4169 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
4170 ASMAtomicBitClear(pbBitmap + offVector, idxVectorBit);
4171}
4172
4173
4174/**
4175 * Checks if a memory access to the APIC-access page must causes an APIC-access
4176 * VM-exit.
4177 *
4178 * @param pVCpu The cross context virtual CPU structure.
4179 * @param offAccess The offset of the register being accessed.
4180 * @param cbAccess The size of the access in bytes.
4181 * @param fAccess The type of access (must be IEM_ACCESS_TYPE_READ or
4182 * IEM_ACCESS_TYPE_WRITE).
4183 *
4184 * @remarks This must not be used for MSR-based APIC-access page accesses!
4185 * @sa iemVmxVirtApicAccessMsrWrite, iemVmxVirtApicAccessMsrRead.
4186 */
4187IEM_STATIC bool iemVmxVirtApicIsMemAccessIntercepted(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, uint32_t fAccess)
4188{
4189 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4190 Assert(pVmcs);
4191 Assert(fAccess == IEM_ACCESS_TYPE_READ || fAccess == IEM_ACCESS_TYPE_WRITE);
4192
4193 /*
4194 * We must cause a VM-exit if any of the following are true:
4195 * - TPR shadowing isn't active.
4196 * - The access size exceeds 32-bits.
4197 * - The access is not contained within low 4 bytes of a 16-byte aligned offset.
4198 *
4199 * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
4200 * See Intel spec. 29.4.3.1 "Determining Whether a Write Access is Virtualized".
4201 */
4202 if ( !(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4203 || cbAccess > sizeof(uint32_t)
4204 || ((offAccess + cbAccess - 1) & 0xc)
4205 || offAccess >= XAPIC_OFF_END + 4)
4206 return true;
4207
4208 /*
4209 * If the access is part of an operation where we have already
4210 * virtualized a virtual-APIC write, we must cause a VM-exit.
4211 */
4212 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4213 return true;
4214
4215 /*
4216 * Check write accesses to the APIC-access page that cause VM-exits.
4217 */
4218 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4219 {
4220 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4221 {
4222 /*
4223 * With APIC-register virtualization, a write access to any of the
4224 * following registers are virtualized. Accessing any other register
4225 * causes a VM-exit.
4226 */
4227 uint16_t const offAlignedAccess = offAccess & 0xfffc;
4228 switch (offAlignedAccess)
4229 {
4230 case XAPIC_OFF_ID:
4231 case XAPIC_OFF_TPR:
4232 case XAPIC_OFF_EOI:
4233 case XAPIC_OFF_LDR:
4234 case XAPIC_OFF_DFR:
4235 case XAPIC_OFF_SVR:
4236 case XAPIC_OFF_ESR:
4237 case XAPIC_OFF_ICR_LO:
4238 case XAPIC_OFF_ICR_HI:
4239 case XAPIC_OFF_LVT_TIMER:
4240 case XAPIC_OFF_LVT_THERMAL:
4241 case XAPIC_OFF_LVT_PERF:
4242 case XAPIC_OFF_LVT_LINT0:
4243 case XAPIC_OFF_LVT_LINT1:
4244 case XAPIC_OFF_LVT_ERROR:
4245 case XAPIC_OFF_TIMER_ICR:
4246 case XAPIC_OFF_TIMER_DCR:
4247 break;
4248 default:
4249 return true;
4250 }
4251 }
4252 else if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4253 {
4254 /*
4255 * With virtual-interrupt delivery, a write access to any of the
4256 * following registers are virtualized. Accessing any other register
4257 * causes a VM-exit.
4258 *
4259 * Note! The specification does not allow writing to offsets in-between
4260 * these registers (e.g. TPR + 1 byte) unlike read accesses.
4261 */
4262 switch (offAccess)
4263 {
4264 case XAPIC_OFF_TPR:
4265 case XAPIC_OFF_EOI:
4266 case XAPIC_OFF_ICR_LO:
4267 break;
4268 default:
4269 return true;
4270 }
4271 }
4272 else
4273 {
4274 /*
4275 * Without APIC-register virtualization or virtual-interrupt delivery,
4276 * only TPR accesses are virtualized.
4277 */
4278 if (offAccess == XAPIC_OFF_TPR)
4279 { /* likely */ }
4280 else
4281 return true;
4282 }
4283 }
4284 else
4285 {
4286 /*
4287 * Check read accesses to the APIC-access page that cause VM-exits.
4288 */
4289 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4290 {
4291 /*
4292 * With APIC-register virtualization, a read access to any of the
4293 * following registers are virtualized. Accessing any other register
4294 * causes a VM-exit.
4295 */
4296 uint16_t const offAlignedAccess = offAccess & 0xfffc;
4297 switch (offAlignedAccess)
4298 {
4299 /** @todo r=ramshankar: What about XAPIC_OFF_LVT_CMCI? */
4300 case XAPIC_OFF_ID:
4301 case XAPIC_OFF_VERSION:
4302 case XAPIC_OFF_TPR:
4303 case XAPIC_OFF_EOI:
4304 case XAPIC_OFF_LDR:
4305 case XAPIC_OFF_DFR:
4306 case XAPIC_OFF_SVR:
4307 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
4308 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
4309 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
4310 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
4311 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
4312 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
4313 case XAPIC_OFF_ESR:
4314 case XAPIC_OFF_ICR_LO:
4315 case XAPIC_OFF_ICR_HI:
4316 case XAPIC_OFF_LVT_TIMER:
4317 case XAPIC_OFF_LVT_THERMAL:
4318 case XAPIC_OFF_LVT_PERF:
4319 case XAPIC_OFF_LVT_LINT0:
4320 case XAPIC_OFF_LVT_LINT1:
4321 case XAPIC_OFF_LVT_ERROR:
4322 case XAPIC_OFF_TIMER_ICR:
4323 case XAPIC_OFF_TIMER_DCR:
4324 break;
4325 default:
4326 return true;
4327 }
4328 }
4329 else
4330 {
4331 /* Without APIC-register virtualization, only TPR accesses are virtualized. */
4332 if (offAccess == XAPIC_OFF_TPR)
4333 { /* likely */ }
4334 else
4335 return true;
4336 }
4337 }
4338
4339 /* The APIC-access is virtualized, does not cause a VM-exit. */
4340 return false;
4341}
4342
4343
4344/**
4345 * Virtualizes a memory-based APIC-access where the address is not used to access
4346 * memory.
4347 *
4348 * This is for instructions like MONITOR, CLFLUSH, CLFLUSHOPT, ENTER which may cause
4349 * page-faults but do not use the address to access memory.
4350 *
4351 * @param pVCpu The cross context virtual CPU structure.
4352 * @param pGCPhysAccess Pointer to the guest-physical address used.
4353 */
4354IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPU pVCpu, PRTGCPHYS pGCPhysAccess)
4355{
4356 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4357 Assert(pVmcs);
4358 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
4359 Assert(pGCPhysAccess);
4360
4361 RTGCPHYS const GCPhysAccess = *pGCPhysAccess & ~(RTGCPHYS)PAGE_OFFSET_MASK;
4362 RTGCPHYS const GCPhysApic = pVmcs->u64AddrApicAccess.u;
4363 Assert(!(GCPhysApic & PAGE_OFFSET_MASK));
4364
4365 if (GCPhysAccess == GCPhysApic)
4366 {
4367 uint16_t const offAccess = *pGCPhysAccess & PAGE_OFFSET_MASK;
4368 uint32_t const fAccess = IEM_ACCESS_TYPE_READ;
4369 uint16_t const cbAccess = 1;
4370 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess);
4371 if (fIntercept)
4372 return iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess);
4373
4374 *pGCPhysAccess = GCPhysApic | offAccess;
4375 return VINF_VMX_MODIFIES_BEHAVIOR;
4376 }
4377
4378 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4379}
4380
4381
4382/**
4383 * Virtualizes a memory-based APIC-access.
4384 *
4385 * @returns VBox strict status code.
4386 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the access was virtualized.
4387 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
4388 *
4389 * @param pVCpu The cross context virtual CPU structure.
4390 * @param offAccess The offset of the register being accessed (within the
4391 * APIC-access page).
4392 * @param cbAccess The size of the access in bytes.
4393 * @param pvData Pointer to the data being written or where to store the data
4394 * being read.
4395 * @param fAccess The type of access (must contain IEM_ACCESS_TYPE_READ or
4396 * IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION).
4397 */
4398IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData,
4399 uint32_t fAccess)
4400{
4401 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4402 Assert(pVmcs);
4403 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
4404 Assert(pvData);
4405 Assert( (fAccess & IEM_ACCESS_TYPE_READ)
4406 || (fAccess & IEM_ACCESS_TYPE_WRITE)
4407 || (fAccess & IEM_ACCESS_INSTRUCTION));
4408
4409 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess);
4410 if (fIntercept)
4411 return iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess);
4412
4413 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4414 {
4415 /*
4416 * A write access to the APIC-access page that is virtualized (rather than
4417 * causing a VM-exit) writes data to the virtual-APIC page.
4418 */
4419 uint32_t const u32Data = *(uint32_t *)pvData;
4420 iemVmxVirtApicWriteRaw32(pVCpu, offAccess, u32Data);
4421
4422 /*
4423 * Record the currently updated APIC offset, as we need this later for figuring
4424 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4425 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4426 *
4427 * After completion of the current operation, we need to perform TPR virtualization,
4428 * EOI virtualization or APIC-write VM-exit depending on which register was written.
4429 *
4430 * The current operation may be a REP-prefixed string instruction, execution of any
4431 * other instruction, or delivery of an event through the IDT.
4432 *
4433 * Thus things like clearing bytes 3:1 of the VTPR, clearing VEOI are not to be
4434 * performed now but later after completion of the current operation.
4435 *
4436 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4437 */
4438 iemVmxVirtApicSetPendingWrite(pVCpu, offAccess);
4439 }
4440 else
4441 {
4442 /*
4443 * A read access from the APIC-access page that is virtualized (rather than
4444 * causing a VM-exit) returns data from the virtual-APIC page.
4445 *
4446 * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
4447 */
4448 Assert(cbAccess <= 4);
4449 Assert(offAccess < XAPIC_OFF_END + 4);
4450 static uint32_t const s_auAccessSizeMasks[] = { 0, 0xff, 0xffff, 0xffffff, 0xffffffff };
4451
4452 uint32_t u32Data = iemVmxVirtApicReadRaw32(pVCpu, offAccess);
4453 u32Data &= s_auAccessSizeMasks[cbAccess];
4454 *(uint32_t *)pvData = u32Data;
4455 }
4456
4457 return VINF_VMX_MODIFIES_BEHAVIOR;
4458}
4459
4460
4461/**
4462 * Virtualizes an MSR-based APIC read access.
4463 *
4464 * @returns VBox strict status code.
4465 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR read was virtualized.
4466 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR read access must be
4467 * handled by the x2APIC device.
4468 * @retval VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
4469 * not within the range of valid MSRs, caller must raise \#GP(0).
4470 * @param pVCpu The cross context virtual CPU structure.
4471 * @param idMsr The x2APIC MSR being read.
4472 * @param pu64Value Where to store the read x2APIC MSR value (only valid when
4473 * VINF_VMX_MODIFIES_BEHAVIOR is returned).
4474 */
4475IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value)
4476{
4477 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4478 Assert(pVmcs);
4479 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
4480 Assert(pu64Value);
4481
4482 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4483 {
4484 /*
4485 * Intel has different ideas in the x2APIC spec. vs the VT-x spec. as to
4486 * what the end of the valid x2APIC MSR range is. Hence the use of different
4487 * macros here.
4488 *
4489 * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
4490 * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
4491 */
4492 if ( idMsr >= VMX_V_VIRT_APIC_MSR_START
4493 && idMsr <= VMX_V_VIRT_APIC_MSR_END)
4494 {
4495 uint16_t const offReg = (idMsr & 0xff) << 4;
4496 uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
4497 *pu64Value = u64Value;
4498 return VINF_VMX_MODIFIES_BEHAVIOR;
4499 }
4500 return VERR_OUT_OF_RANGE;
4501 }
4502
4503 if (idMsr == MSR_IA32_X2APIC_TPR)
4504 {
4505 uint16_t const offReg = (idMsr & 0xff) << 4;
4506 uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
4507 *pu64Value = u64Value;
4508 return VINF_VMX_MODIFIES_BEHAVIOR;
4509 }
4510
4511 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4512}
4513
4514
4515/**
4516 * Virtualizes an MSR-based APIC write access.
4517 *
4518 * @returns VBox strict status code.
4519 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR write was virtualized.
4520 * @retval VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
4521 * not within the range of valid MSRs, caller must raise \#GP(0).
4522 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR must be written normally.
4523 *
4524 * @param pVCpu The cross context virtual CPU structure.
4525 * @param idMsr The x2APIC MSR being written.
4526 * @param u64Value The value of the x2APIC MSR being written.
4527 */
4528IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value)
4529{
4530 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4531 Assert(pVmcs);
4532
4533 /*
4534 * Check if the access is to be virtualized.
4535 * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
4536 */
4537 if ( idMsr == MSR_IA32_X2APIC_TPR
4538 || ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4539 && ( idMsr == MSR_IA32_X2APIC_EOI
4540 || idMsr == MSR_IA32_X2APIC_SELF_IPI)))
4541 {
4542 /* Validate the MSR write depending on the register. */
4543 switch (idMsr)
4544 {
4545 case MSR_IA32_X2APIC_TPR:
4546 case MSR_IA32_X2APIC_SELF_IPI:
4547 {
4548 if (u64Value & UINT64_C(0xffffffffffffff00))
4549 return VERR_OUT_OF_RANGE;
4550 break;
4551 }
4552 case MSR_IA32_X2APIC_EOI:
4553 {
4554 if (u64Value != 0)
4555 return VERR_OUT_OF_RANGE;
4556 break;
4557 }
4558 }
4559
4560 /* Write the MSR to the virtual-APIC page. */
4561 uint16_t const offReg = (idMsr & 0xff) << 4;
4562 iemVmxVirtApicWriteRaw64(pVCpu, offReg, u64Value);
4563
4564 /*
4565 * Record the currently updated APIC offset, as we need this later for figuring
4566 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4567 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4568 */
4569 iemVmxVirtApicSetPendingWrite(pVCpu, offReg);
4570
4571 return VINF_VMX_MODIFIES_BEHAVIOR;
4572 }
4573
4574 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4575}
4576
4577
4578/**
4579 * Finds the most significant set bit in a virtual-APIC 256-bit sparse register.
4580 *
4581 * @returns VBox status code.
4582 * @retval VINF_SUCCES when the highest set bit is found.
4583 * @retval VERR_NOT_FOUND when no bit is set.
4584 *
4585 * @param pVCpu The cross context virtual CPU structure.
4586 * @param offReg The offset of the APIC 256-bit sparse register.
4587 * @param pidxHighestBit Where to store the highest bit (most significant bit)
4588 * set in the register. Only valid when VINF_SUCCESS is
4589 * returned.
4590 *
4591 * @remarks The format of the 256-bit sparse register here mirrors that found in
4592 * real APIC hardware.
4593 */
4594static int iemVmxVirtApicGetHighestSetBitInReg(PVMCPU pVCpu, uint16_t offReg, uint8_t *pidxHighestBit)
4595{
4596 Assert(offReg < XAPIC_OFF_END + 4);
4597 Assert(pidxHighestBit);
4598
4599 /*
4600 * There are 8 contiguous fragments (of 16-bytes each) in the sparse register.
4601 * However, in each fragment only the first 4 bytes are used.
4602 */
4603 uint8_t const cFrags = 8;
4604 for (int8_t iFrag = cFrags; iFrag >= 0; iFrag--)
4605 {
4606 uint16_t const offFrag = iFrag * 16;
4607 uint32_t const u32Frag = iemVmxVirtApicReadRaw32(pVCpu, offFrag);
4608 if (!u32Frag)
4609 continue;
4610
4611 unsigned idxHighestBit = ASMBitLastSetU32(u32Frag);
4612 Assert(idxHighestBit > 0);
4613 --idxHighestBit;
4614 Assert(idxHighestBit <= UINT8_MAX);
4615 *pidxHighestBit = idxHighestBit;
4616 return VINF_SUCCESS;
4617 }
4618 return VERR_NOT_FOUND;
4619}
4620
4621
4622/**
4623 * Evaluates pending virtual interrupts.
4624 *
4625 * @param pVCpu The cross context virtual CPU structure.
4626 */
4627IEM_STATIC void iemVmxEvalPendingVirtIntrs(PVMCPU pVCpu)
4628{
4629 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4630 Assert(pVmcs);
4631 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4632
4633 if (!(pVmcs->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4634 {
4635 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
4636 uint8_t const uPpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_PPR);
4637
4638 if ((uRvi >> 4) > (uPpr >> 4))
4639 {
4640 Log2(("eval_virt_intrs: uRvi=%#x uPpr=%#x - Signaling pending interrupt\n", uRvi, uPpr));
4641 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
4642 }
4643 else
4644 Log2(("eval_virt_intrs: uRvi=%#x uPpr=%#x - Nothing to do\n", uRvi, uPpr));
4645 }
4646}
4647
4648
4649/**
4650 * Performs PPR virtualization.
4651 *
4652 * @returns VBox strict status code.
4653 * @param pVCpu The cross context virtual CPU structure.
4654 */
4655IEM_STATIC void iemVmxPprVirtualization(PVMCPU pVCpu)
4656{
4657 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4658 Assert(pVmcs);
4659 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4660 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4661
4662 /*
4663 * PPR virtualization is caused in response to a VM-entry, TPR-virtualization,
4664 * or EOI-virtualization.
4665 *
4666 * See Intel spec. 29.1.3 "PPR Virtualization".
4667 */
4668 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4669 uint32_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
4670
4671 uint32_t uPpr;
4672 if (((uTpr >> 4) & 0xf) >= ((uSvi >> 4) & 0xf))
4673 uPpr = uTpr & 0xff;
4674 else
4675 uPpr = uSvi & 0xf0;
4676
4677 Log2(("ppr_virt: uTpr=%#x uSvi=%#x uPpr=%#x\n", uTpr, uSvi, uPpr));
4678 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_PPR, uPpr);
4679}
4680
4681
4682/**
4683 * Performs VMX TPR virtualization.
4684 *
4685 * @returns VBox strict status code.
4686 * @param pVCpu The cross context virtual CPU structure.
4687 */
4688IEM_STATIC VBOXSTRICTRC iemVmxTprVirtualization(PVMCPU pVCpu)
4689{
4690 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4691 Assert(pVmcs);
4692 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4693
4694 /*
4695 * We should have already performed the virtual-APIC write to the TPR offset
4696 * in the virtual-APIC page. We now perform TPR virtualization.
4697 *
4698 * See Intel spec. 29.1.2 "TPR Virtualization".
4699 */
4700 if (!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
4701 {
4702 uint32_t const uTprThreshold = pVmcs->u32TprThreshold;
4703 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4704
4705 /*
4706 * If the VTPR falls below the TPR threshold, we must cause a VM-exit.
4707 * See Intel spec. 29.1.2 "TPR Virtualization".
4708 */
4709 if (((uTpr >> 4) & 0xf) < uTprThreshold)
4710 {
4711 Log2(("tpr_virt: uTpr=%u uTprThreshold=%u -> VM-exit\n", uTpr, uTprThreshold));
4712 iemVmxVmcsSetExitQual(pVCpu, 0);
4713 return iemVmxVmexit(pVCpu, VMX_EXIT_TPR_BELOW_THRESHOLD);
4714 }
4715 }
4716 else
4717 {
4718 iemVmxPprVirtualization(pVCpu);
4719 iemVmxEvalPendingVirtIntrs(pVCpu);
4720 }
4721
4722 return VINF_SUCCESS;
4723}
4724
4725
4726/**
4727 * Checks whether an EOI write for the given interrupt vector causes a VM-exit or
4728 * not.
4729 *
4730 * @returns @c true if the EOI write is intercepted, @c false otherwise.
4731 * @param pVCpu The cross context virtual CPU structure.
4732 * @param uVector The interrupt that was acknowledged using an EOI.
4733 */
4734IEM_STATIC bool iemVmxIsEoiInterceptSet(PVMCPU pVCpu, uint8_t uVector)
4735{
4736 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4737 Assert(pVmcs);
4738 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4739
4740 if (uVector < 64)
4741 return RT_BOOL(pVmcs->u64EoiExitBitmap0.u & RT_BIT_64(uVector));
4742 if (uVector < 128)
4743 return RT_BOOL(pVmcs->u64EoiExitBitmap1.u & RT_BIT_64(uVector));
4744 if (uVector < 192)
4745 return RT_BOOL(pVmcs->u64EoiExitBitmap2.u & RT_BIT_64(uVector));
4746 return RT_BOOL(pVmcs->u64EoiExitBitmap3.u & RT_BIT_64(uVector));
4747}
4748
4749
4750/**
4751 * Performs EOI virtualization.
4752 *
4753 * @returns VBox strict status code.
4754 * @param pVCpu The cross context virtual CPU structure.
4755 */
4756IEM_STATIC VBOXSTRICTRC iemVmxEoiVirtualization(PVMCPU pVCpu)
4757{
4758 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4759 Assert(pVmcs);
4760 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4761
4762 /*
4763 * Clear the interrupt guest-interrupt as no longer in-service (ISR)
4764 * and get the next guest-interrupt that's in-service (if any).
4765 *
4766 * See Intel spec. 29.1.4 "EOI Virtualization".
4767 */
4768 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
4769 uint8_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
4770 Log2(("eoi_virt: uRvi=%#x uSvi=%#x\n", uRvi, uSvi));
4771
4772 uint8_t uVector = uSvi;
4773 iemVmxVirtApicClearVector(pVCpu, XAPIC_OFF_ISR0, uVector);
4774
4775 uVector = 0;
4776 iemVmxVirtApicGetHighestSetBitInReg(pVCpu, XAPIC_OFF_ISR0, &uVector);
4777
4778 if (uVector)
4779 Log2(("eoi_virt: next interrupt %#x\n", uVector));
4780 else
4781 Log2(("eoi_virt: no interrupt pending in ISR\n"));
4782
4783 /* Update guest-interrupt status SVI (leave RVI portion as it is) in the VMCS. */
4784 pVmcs->u16GuestIntStatus = RT_MAKE_U16(uRvi, uVector);
4785
4786 iemVmxPprVirtualization(pVCpu);
4787 if (iemVmxIsEoiInterceptSet(pVCpu, uVector))
4788 return iemVmxVmexitVirtEoi(pVCpu, uVector);
4789 iemVmxEvalPendingVirtIntrs(pVCpu);
4790 return VINF_SUCCESS;
4791}
4792
4793
4794/**
4795 * Performs self-IPI virtualization.
4796 *
4797 * @returns VBox strict status code.
4798 * @param pVCpu The cross context virtual CPU structure.
4799 */
4800IEM_STATIC VBOXSTRICTRC iemVmxSelfIpiVirtualization(PVMCPU pVCpu)
4801{
4802 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4803 Assert(pVmcs);
4804 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4805
4806 /*
4807 * We should have already performed the virtual-APIC write to the self-IPI offset
4808 * in the virtual-APIC page. We now perform self-IPI virtualization.
4809 *
4810 * See Intel spec. 29.1.5 "Self-IPI Virtualization".
4811 */
4812 uint8_t const uVector = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_ICR_LO);
4813 Log2(("self_ipi_virt: uVector=%#x\n", uVector));
4814 iemVmxVirtApicSetVector(pVCpu, XAPIC_OFF_IRR0, uVector);
4815 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
4816 uint8_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
4817 if (uVector > uRvi)
4818 pVmcs->u16GuestIntStatus = RT_MAKE_U16(uVector, uSvi);
4819 iemVmxEvalPendingVirtIntrs(pVCpu);
4820 return VINF_SUCCESS;
4821}
4822
4823
4824/**
4825 * Performs VMX APIC-write emulation.
4826 *
4827 * @returns VBox strict status code.
4828 * @param pVCpu The cross context virtual CPU structure.
4829 */
4830IEM_STATIC VBOXSTRICTRC iemVmxApicWriteEmulation(PVMCPU pVCpu)
4831{
4832 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4833 Assert(pVmcs);
4834 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT);
4835
4836 /*
4837 * Perform APIC-write emulation based on the virtual-APIC register written.
4838 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4839 */
4840 uint16_t const offApicWrite = iemVmxVirtApicClearPendingWrite(pVCpu);
4841 VBOXSTRICTRC rcStrict;
4842 switch (offApicWrite)
4843 {
4844 case XAPIC_OFF_TPR:
4845 {
4846 /* Clear bytes 3:1 of the VTPR and perform TPR virtualization. */
4847 uint32_t uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4848 uTpr &= UINT32_C(0x000000ff);
4849 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uTpr);
4850 Log2(("iemVmxApicWriteEmulation: TPR write %#x\n", uTpr));
4851 rcStrict = iemVmxTprVirtualization(pVCpu);
4852 break;
4853 }
4854
4855 case XAPIC_OFF_EOI:
4856 {
4857 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4858 {
4859 /* Clear VEOI and perform EOI virtualization. */
4860 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_EOI, 0);
4861 Log2(("iemVmxApicWriteEmulation: EOI write\n"));
4862 rcStrict = iemVmxEoiVirtualization(pVCpu);
4863 }
4864 else
4865 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4866 break;
4867 }
4868
4869 case XAPIC_OFF_ICR_LO:
4870 {
4871 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4872 {
4873 /* If the ICR_LO is valid, write it and perform self-IPI virtualization. */
4874 uint32_t const uIcrLo = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4875 uint32_t const fIcrLoMb0 = UINT32_C(0xfffbb700);
4876 uint32_t const fIcrLoMb1 = UINT32_C(0x000000f0);
4877 if ( !(uIcrLo & fIcrLoMb0)
4878 && (uIcrLo & fIcrLoMb1))
4879 {
4880 Log2(("iemVmxApicWriteEmulation: Self-IPI virtualization with vector %#x\n", (uIcrLo & 0xff)));
4881 rcStrict = iemVmxSelfIpiVirtualization(pVCpu);
4882 }
4883 else
4884 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4885 }
4886 else
4887 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4888 break;
4889 }
4890
4891 case XAPIC_OFF_ICR_HI:
4892 {
4893 /* Clear bytes 2:0 of VICR_HI. No other virtualization or VM-exit must occur. */
4894 uint32_t uIcrHi = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_ICR_HI);
4895 uIcrHi &= UINT32_C(0xff000000);
4896 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_ICR_HI, uIcrHi);
4897 rcStrict = VINF_SUCCESS;
4898 break;
4899 }
4900
4901 default:
4902 {
4903 /* Writes to any other virtual-APIC register causes an APIC-write VM-exit. */
4904 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4905 break;
4906 }
4907 }
4908
4909 return rcStrict;
4910}
4911
4912
4913/**
4914 * Checks guest control registers, debug registers and MSRs as part of VM-entry.
4915 *
4916 * @param pVCpu The cross context virtual CPU structure.
4917 * @param pszInstr The VMX instruction name (for logging purposes).
4918 */
4919IEM_STATIC int iemVmxVmentryCheckGuestControlRegsMsrs(PVMCPU pVCpu, const char *pszInstr)
4920{
4921 /*
4922 * Guest Control Registers, Debug Registers, and MSRs.
4923 * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs".
4924 */
4925 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4926 const char *const pszFailure = "VM-exit";
4927 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
4928
4929 /* CR0 reserved bits. */
4930 {
4931 /* CR0 MB1 bits. */
4932 uint64_t u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
4933 Assert(!(u64Cr0Fixed0 & (X86_CR0_NW | X86_CR0_CD)));
4934 if (fUnrestrictedGuest)
4935 u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
4936 if ((pVmcs->u64GuestCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0)
4937 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed0);
4938
4939 /* CR0 MBZ bits. */
4940 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
4941 if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1)
4942 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1);
4943
4944 /* Without unrestricted guest support, VT-x supports does not support unpaged protected mode. */
4945 if ( !fUnrestrictedGuest
4946 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)
4947 && !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
4948 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0PgPe);
4949 }
4950
4951 /* CR4 reserved bits. */
4952 {
4953 /* CR4 MB1 bits. */
4954 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
4955 if ((pVmcs->u64GuestCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
4956 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0);
4957
4958 /* CR4 MBZ bits. */
4959 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
4960 if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1)
4961 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1);
4962 }
4963
4964 /* DEBUGCTL MSR. */
4965 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4966 && (pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL))
4967 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDebugCtl);
4968
4969 /* 64-bit CPU checks. */
4970 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4971 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4972 {
4973 if (fGstInLongMode)
4974 {
4975 /* PAE must be set. */
4976 if ( (pVmcs->u64GuestCr0.u & X86_CR0_PG)
4977 && (pVmcs->u64GuestCr0.u & X86_CR4_PAE))
4978 { /* likely */ }
4979 else
4980 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPae);
4981 }
4982 else
4983 {
4984 /* PCIDE should not be set. */
4985 if (!(pVmcs->u64GuestCr4.u & X86_CR4_PCIDE))
4986 { /* likely */ }
4987 else
4988 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPcide);
4989 }
4990
4991 /* CR3. */
4992 if (!(pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
4993 { /* likely */ }
4994 else
4995 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr3);
4996
4997 /* DR7. */
4998 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4999 && (pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK))
5000 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDr7);
5001
5002 /* SYSENTER ESP and SYSENTER EIP. */
5003 if ( X86_IS_CANONICAL(pVmcs->u64GuestSysenterEsp.u)
5004 && X86_IS_CANONICAL(pVmcs->u64GuestSysenterEip.u))
5005 { /* likely */ }
5006 else
5007 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSysenterEspEip);
5008 }
5009
5010 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
5011 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
5012
5013 /* PAT MSR. */
5014 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5015 && !CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u))
5016 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPatMsr);
5017
5018 /* EFER MSR. */
5019 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
5020 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5021 && (pVmcs->u64GuestEferMsr.u & ~uValidEferMask))
5022 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsrRsvd);
5023
5024 bool const fGstLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
5025 bool const fGstLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
5026 if ( fGstInLongMode == fGstLma
5027 && ( !(pVmcs->u64GuestCr0.u & X86_CR0_PG)
5028 || fGstLma == fGstLme))
5029 { /* likely */ }
5030 else
5031 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsr);
5032
5033 /* We don't support IA32_BNDCFGS MSR yet. */
5034 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
5035
5036 NOREF(pszInstr);
5037 NOREF(pszFailure);
5038 return VINF_SUCCESS;
5039}
5040
5041
5042/**
5043 * Checks guest segment registers, LDTR and TR as part of VM-entry.
5044 *
5045 * @param pVCpu The cross context virtual CPU structure.
5046 * @param pszInstr The VMX instruction name (for logging purposes).
5047 */
5048IEM_STATIC int iemVmxVmentryCheckGuestSegRegs(PVMCPU pVCpu, const char *pszInstr)
5049{
5050 /*
5051 * Segment registers.
5052 * See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
5053 */
5054 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5055 const char *const pszFailure = "VM-exit";
5056 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);
5057 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
5058 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5059
5060 /* Selectors. */
5061 if ( !fGstInV86Mode
5062 && !fUnrestrictedGuest
5063 && (pVmcs->GuestSs & X86_SEL_RPL) != (pVmcs->GuestCs & X86_SEL_RPL))
5064 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelCsSsRpl);
5065
5066 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
5067 {
5068 CPUMSELREG SelReg;
5069 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &SelReg);
5070 if (RT_LIKELY(rc == VINF_SUCCESS))
5071 { /* likely */ }
5072 else
5073 return rc;
5074
5075 /*
5076 * Virtual-8086 mode checks.
5077 */
5078 if (fGstInV86Mode)
5079 {
5080 /* Base address. */
5081 if (SelReg.u64Base == (uint64_t)SelReg.Sel << 4)
5082 { /* likely */ }
5083 else
5084 {
5085 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBaseV86(iSegReg);
5086 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5087 }
5088
5089 /* Limit. */
5090 if (SelReg.u32Limit == 0xffff)
5091 { /* likely */ }
5092 else
5093 {
5094 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegLimitV86(iSegReg);
5095 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5096 }
5097
5098 /* Attribute. */
5099 if (SelReg.Attr.u == 0xf3)
5100 { /* likely */ }
5101 else
5102 {
5103 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrV86(iSegReg);
5104 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5105 }
5106
5107 /* We're done; move to checking the next segment. */
5108 continue;
5109 }
5110
5111 /* Checks done by 64-bit CPUs. */
5112 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5113 {
5114 /* Base address. */
5115 if ( iSegReg == X86_SREG_FS
5116 || iSegReg == X86_SREG_GS)
5117 {
5118 if (X86_IS_CANONICAL(SelReg.u64Base))
5119 { /* likely */ }
5120 else
5121 {
5122 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
5123 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5124 }
5125 }
5126 else if (iSegReg == X86_SREG_CS)
5127 {
5128 if (!RT_HI_U32(SelReg.u64Base))
5129 { /* likely */ }
5130 else
5131 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseCs);
5132 }
5133 else
5134 {
5135 if ( SelReg.Attr.n.u1Unusable
5136 || !RT_HI_U32(SelReg.u64Base))
5137 { /* likely */ }
5138 else
5139 {
5140 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
5141 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5142 }
5143 }
5144 }
5145
5146 /*
5147 * Checks outside Virtual-8086 mode.
5148 */
5149 uint8_t const uSegType = SelReg.Attr.n.u4Type;
5150 uint8_t const fCodeDataSeg = SelReg.Attr.n.u1DescType;
5151 uint8_t const fUsable = !SelReg.Attr.n.u1Unusable;
5152 uint8_t const uDpl = SelReg.Attr.n.u2Dpl;
5153 uint8_t const fPresent = SelReg.Attr.n.u1Present;
5154 uint8_t const uGranularity = SelReg.Attr.n.u1Granularity;
5155 uint8_t const uDefBig = SelReg.Attr.n.u1DefBig;
5156 uint8_t const fSegLong = SelReg.Attr.n.u1Long;
5157
5158 /* Code or usable segment. */
5159 if ( iSegReg == X86_SREG_CS
5160 || fUsable)
5161 {
5162 /* Reserved bits (bits 31:17 and bits 11:8). */
5163 if (!(SelReg.Attr.u & 0xfffe0f00))
5164 { /* likely */ }
5165 else
5166 {
5167 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrRsvd(iSegReg);
5168 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5169 }
5170
5171 /* Descriptor type. */
5172 if (fCodeDataSeg)
5173 { /* likely */ }
5174 else
5175 {
5176 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDescType(iSegReg);
5177 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5178 }
5179
5180 /* Present. */
5181 if (fPresent)
5182 { /* likely */ }
5183 else
5184 {
5185 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrPresent(iSegReg);
5186 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5187 }
5188
5189 /* Granularity. */
5190 if ( ((SelReg.u32Limit & 0x00000fff) == 0x00000fff || !uGranularity)
5191 && ((SelReg.u32Limit & 0xfff00000) == 0x00000000 || uGranularity))
5192 { /* likely */ }
5193 else
5194 {
5195 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrGran(iSegReg);
5196 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5197 }
5198 }
5199
5200 if (iSegReg == X86_SREG_CS)
5201 {
5202 /* Segment Type and DPL. */
5203 if ( uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5204 && fUnrestrictedGuest)
5205 {
5206 if (uDpl == 0)
5207 { /* likely */ }
5208 else
5209 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplZero);
5210 }
5211 else if ( uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_ACCESSED)
5212 || uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
5213 {
5214 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5215 if (uDpl == AttrSs.n.u2Dpl)
5216 { /* likely */ }
5217 else
5218 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplEqSs);
5219 }
5220 else if ((uSegType & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
5221 == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
5222 {
5223 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5224 if (uDpl <= AttrSs.n.u2Dpl)
5225 { /* likely */ }
5226 else
5227 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplLtSs);
5228 }
5229 else
5230 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsType);
5231
5232 /* Def/Big. */
5233 if ( fGstInLongMode
5234 && fSegLong)
5235 {
5236 if (uDefBig == 0)
5237 { /* likely */ }
5238 else
5239 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDefBig);
5240 }
5241 }
5242 else if (iSegReg == X86_SREG_SS)
5243 {
5244 /* Segment Type. */
5245 if ( !fUsable
5246 || uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5247 || uSegType == (X86_SEL_TYPE_DOWN | X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED))
5248 { /* likely */ }
5249 else
5250 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsType);
5251
5252 /* DPL. */
5253 if (fUnrestrictedGuest)
5254 {
5255 if (uDpl == (SelReg.Sel & X86_SEL_RPL))
5256 { /* likely */ }
5257 else
5258 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplEqRpl);
5259 }
5260 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
5261 if ( AttrCs.n.u4Type == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5262 || (pVmcs->u64GuestCr0.u & X86_CR0_PE))
5263 {
5264 if (uDpl == 0)
5265 { /* likely */ }
5266 else
5267 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplZero);
5268 }
5269 }
5270 else
5271 {
5272 /* DS, ES, FS, GS. */
5273 if (fUsable)
5274 {
5275 /* Segment type. */
5276 if (uSegType & X86_SEL_TYPE_ACCESSED)
5277 { /* likely */ }
5278 else
5279 {
5280 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrTypeAcc(iSegReg);
5281 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5282 }
5283
5284 if ( !(uSegType & X86_SEL_TYPE_CODE)
5285 || (uSegType & X86_SEL_TYPE_READ))
5286 { /* likely */ }
5287 else
5288 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsTypeRead);
5289
5290 /* DPL. */
5291 if ( !fUnrestrictedGuest
5292 && uSegType <= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
5293 {
5294 if (uDpl >= (SelReg.Sel & X86_SEL_RPL))
5295 { /* likely */ }
5296 else
5297 {
5298 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDplRpl(iSegReg);
5299 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5300 }
5301 }
5302 }
5303 }
5304 }
5305
5306 /*
5307 * LDTR.
5308 */
5309 {
5310 CPUMSELREG Ldtr;
5311 Ldtr.Sel = pVmcs->GuestLdtr;
5312 Ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
5313 Ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
5314 Ldtr.Attr.u = pVmcs->u32GuestLdtrLimit;
5315
5316 if (!Ldtr.Attr.n.u1Unusable)
5317 {
5318 /* Selector. */
5319 if (!(Ldtr.Sel & X86_SEL_LDT))
5320 { /* likely */ }
5321 else
5322 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelLdtr);
5323
5324 /* Base. */
5325 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5326 {
5327 if (X86_IS_CANONICAL(Ldtr.u64Base))
5328 { /* likely */ }
5329 else
5330 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseLdtr);
5331 }
5332
5333 /* Attributes. */
5334 /* Reserved bits (bits 31:17 and bits 11:8). */
5335 if (!(Ldtr.Attr.u & 0xfffe0f00))
5336 { /* likely */ }
5337 else
5338 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrRsvd);
5339
5340 if (Ldtr.Attr.n.u4Type == X86_SEL_TYPE_SYS_LDT)
5341 { /* likely */ }
5342 else
5343 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrType);
5344
5345 if (!Ldtr.Attr.n.u1DescType)
5346 { /* likely */ }
5347 else
5348 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrDescType);
5349
5350 if (Ldtr.Attr.n.u1Present)
5351 { /* likely */ }
5352 else
5353 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrPresent);
5354
5355 if ( ((Ldtr.u32Limit & 0x00000fff) == 0x00000fff || !Ldtr.Attr.n.u1Granularity)
5356 && ((Ldtr.u32Limit & 0xfff00000) == 0x00000000 || Ldtr.Attr.n.u1Granularity))
5357 { /* likely */ }
5358 else
5359 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrGran);
5360 }
5361 }
5362
5363 /*
5364 * TR.
5365 */
5366 {
5367 CPUMSELREG Tr;
5368 Tr.Sel = pVmcs->GuestTr;
5369 Tr.u32Limit = pVmcs->u32GuestTrLimit;
5370 Tr.u64Base = pVmcs->u64GuestTrBase.u;
5371 Tr.Attr.u = pVmcs->u32GuestTrLimit;
5372
5373 /* Selector. */
5374 if (!(Tr.Sel & X86_SEL_LDT))
5375 { /* likely */ }
5376 else
5377 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelTr);
5378
5379 /* Base. */
5380 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5381 {
5382 if (X86_IS_CANONICAL(Tr.u64Base))
5383 { /* likely */ }
5384 else
5385 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseTr);
5386 }
5387
5388 /* Attributes. */
5389 /* Reserved bits (bits 31:17 and bits 11:8). */
5390 if (!(Tr.Attr.u & 0xfffe0f00))
5391 { /* likely */ }
5392 else
5393 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrRsvd);
5394
5395 if (!Tr.Attr.n.u1Unusable)
5396 { /* likely */ }
5397 else
5398 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrUnusable);
5399
5400 if ( Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY
5401 || ( !fGstInLongMode
5402 && Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY))
5403 { /* likely */ }
5404 else
5405 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrType);
5406
5407 if (!Tr.Attr.n.u1DescType)
5408 { /* likely */ }
5409 else
5410 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrDescType);
5411
5412 if (Tr.Attr.n.u1Present)
5413 { /* likely */ }
5414 else
5415 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrPresent);
5416
5417 if ( ((Tr.u32Limit & 0x00000fff) == 0x00000fff || !Tr.Attr.n.u1Granularity)
5418 && ((Tr.u32Limit & 0xfff00000) == 0x00000000 || Tr.Attr.n.u1Granularity))
5419 { /* likely */ }
5420 else
5421 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrGran);
5422 }
5423
5424 NOREF(pszInstr);
5425 NOREF(pszFailure);
5426 return VINF_SUCCESS;
5427}
5428
5429
5430/**
5431 * Checks guest GDTR and IDTR as part of VM-entry.
5432 *
5433 * @param pVCpu The cross context virtual CPU structure.
5434 * @param pszInstr The VMX instruction name (for logging purposes).
5435 */
5436IEM_STATIC int iemVmxVmentryCheckGuestGdtrIdtr(PVMCPU pVCpu, const char *pszInstr)
5437{
5438 /*
5439 * GDTR and IDTR.
5440 * See Intel spec. 26.3.1.3 "Checks on Guest Descriptor-Table Registers".
5441 */
5442 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5443 const char *const pszFailure = "VM-exit";
5444
5445 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5446 {
5447 /* Base. */
5448 if (X86_IS_CANONICAL(pVmcs->u64GuestGdtrBase.u))
5449 { /* likely */ }
5450 else
5451 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrBase);
5452
5453 if (X86_IS_CANONICAL(pVmcs->u64GuestIdtrBase.u))
5454 { /* likely */ }
5455 else
5456 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrBase);
5457 }
5458
5459 /* Limit. */
5460 if (!RT_HI_U16(pVmcs->u32GuestGdtrLimit))
5461 { /* likely */ }
5462 else
5463 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrLimit);
5464
5465 if (!RT_HI_U16(pVmcs->u32GuestIdtrLimit))
5466 { /* likely */ }
5467 else
5468 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrLimit);
5469
5470 NOREF(pszInstr);
5471 NOREF(pszFailure);
5472 return VINF_SUCCESS;
5473}
5474
5475
5476/**
5477 * Checks guest RIP and RFLAGS as part of VM-entry.
5478 *
5479 * @param pVCpu The cross context virtual CPU structure.
5480 * @param pszInstr The VMX instruction name (for logging purposes).
5481 */
5482IEM_STATIC int iemVmxVmentryCheckGuestRipRFlags(PVMCPU pVCpu, const char *pszInstr)
5483{
5484 /*
5485 * RIP and RFLAGS.
5486 * See Intel spec. 26.3.1.4 "Checks on Guest RIP and RFLAGS".
5487 */
5488 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5489 const char *const pszFailure = "VM-exit";
5490 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5491
5492 /* RIP. */
5493 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5494 {
5495 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
5496 if ( !fGstInLongMode
5497 || !AttrCs.n.u1Long)
5498 {
5499 if (!RT_HI_U32(pVmcs->u64GuestRip.u))
5500 { /* likely */ }
5501 else
5502 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRipRsvd);
5503 }
5504
5505 if ( fGstInLongMode
5506 && AttrCs.n.u1Long)
5507 {
5508 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth == 48); /* Canonical. */
5509 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth < 64
5510 && X86_IS_CANONICAL(pVmcs->u64GuestRip.u))
5511 { /* likely */ }
5512 else
5513 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRip);
5514 }
5515 }
5516
5517 /* RFLAGS (bits 63:22 (or 31:22), bits 15, 5, 3 are reserved, bit 1 MB1). */
5518 uint64_t const uGuestRFlags = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode ? pVmcs->u64GuestRFlags.u
5519 : pVmcs->u64GuestRFlags.s.Lo;
5520 if ( !(uGuestRFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK))
5521 && (uGuestRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK)
5522 { /* likely */ }
5523 else
5524 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsRsvd);
5525
5526 if ( fGstInLongMode
5527 || !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
5528 {
5529 if (!(uGuestRFlags & X86_EFL_VM))
5530 { /* likely */ }
5531 else
5532 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsVm);
5533 }
5534
5535 if ( VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo)
5536 && VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo) == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5537 {
5538 if (uGuestRFlags & X86_EFL_IF)
5539 { /* likely */ }
5540 else
5541 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsIf);
5542 }
5543
5544 NOREF(pszInstr);
5545 NOREF(pszFailure);
5546 return VINF_SUCCESS;
5547}
5548
5549
5550/**
5551 * Checks guest non-register state as part of VM-entry.
5552 *
5553 * @param pVCpu The cross context virtual CPU structure.
5554 * @param pszInstr The VMX instruction name (for logging purposes).
5555 */
5556IEM_STATIC int iemVmxVmentryCheckGuestNonRegState(PVMCPU pVCpu, const char *pszInstr)
5557{
5558 /*
5559 * Guest non-register state.
5560 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
5561 */
5562 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5563 const char *const pszFailure = "VM-exit";
5564
5565 /*
5566 * Activity state.
5567 */
5568 uint64_t const u64GuestVmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
5569 uint32_t const fActivityStateMask = RT_BF_GET(u64GuestVmxMiscMsr, VMX_BF_MISC_ACTIVITY_STATES);
5570 if (!(pVmcs->u32GuestActivityState & fActivityStateMask))
5571 { /* likely */ }
5572 else
5573 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateRsvd);
5574
5575 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5576 if ( !AttrSs.n.u2Dpl
5577 || pVmcs->u32GuestActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT)
5578 { /* likely */ }
5579 else
5580 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateSsDpl);
5581
5582 if ( pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI
5583 || pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5584 {
5585 if (pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE)
5586 { /* likely */ }
5587 else
5588 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateStiMovSs);
5589 }
5590
5591 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
5592 {
5593 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
5594 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(pVmcs->u32EntryIntInfo);
5595 AssertCompile(VMX_V_GUEST_ACTIVITY_STATE_MASK == (VMX_VMCS_GUEST_ACTIVITY_HLT | VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN));
5596 switch (pVmcs->u32GuestActivityState)
5597 {
5598 case VMX_VMCS_GUEST_ACTIVITY_HLT:
5599 {
5600 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT
5601 || uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
5602 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5603 && ( uVector == X86_XCPT_DB
5604 || uVector == X86_XCPT_MC))
5605 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
5606 && uVector == VMX_ENTRY_INT_INFO_VECTOR_MTF))
5607 { /* likely */ }
5608 else
5609 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateHlt);
5610 break;
5611 }
5612
5613 case VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN:
5614 {
5615 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
5616 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5617 && uVector == X86_XCPT_MC))
5618 { /* likely */ }
5619 else
5620 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateShutdown);
5621 break;
5622 }
5623
5624 case VMX_VMCS_GUEST_ACTIVITY_ACTIVE:
5625 default:
5626 break;
5627 }
5628 }
5629
5630 /*
5631 * Interruptibility state.
5632 */
5633 if (!(pVmcs->u32GuestIntrState & ~VMX_VMCS_GUEST_INT_STATE_MASK))
5634 { /* likely */ }
5635 else
5636 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRsvd);
5637
5638 if ((pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5639 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5640 { /* likely */ }
5641 else
5642 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateStiMovSs);
5643
5644 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_IF)
5645 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5646 { /* likely */ }
5647 else
5648 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRFlagsSti);
5649
5650 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
5651 {
5652 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
5653 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5654 {
5655 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
5656 { /* likely */ }
5657 else
5658 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateExtInt);
5659 }
5660 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5661 {
5662 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
5663 { /* likely */ }
5664 else
5665 {
5666 /*
5667 * We don't support injecting NMIs when blocking-by-STI would be in effect.
5668 * We update the VM-exit qualification only when blocking-by-STI is set
5669 * without blocking-by-MovSS being set. Although in practise it does not
5670 * make much difference since the order of checks are implementation defined.
5671 */
5672 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5673 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_NMI_INJECT);
5674 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateNmi);
5675 }
5676
5677 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5678 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI))
5679 { /* likely */ }
5680 else
5681 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateVirtNmi);
5682 }
5683 }
5684
5685 /* We don't support SMM yet. So blocking-by-SMIs must not be set. */
5686 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI))
5687 { /* likely */ }
5688 else
5689 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateSmi);
5690
5691 /* We don't support SGX yet. So enclave-interruption must not be set. */
5692 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_ENCLAVE))
5693 { /* likely */ }
5694 else
5695 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateEnclave);
5696
5697 /*
5698 * Pending debug exceptions.
5699 */
5700 uint64_t const uPendingDbgXcpt = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode
5701 ? pVmcs->u64GuestPendingDbgXcpt.u
5702 : pVmcs->u64GuestPendingDbgXcpt.s.Lo;
5703 if (!(uPendingDbgXcpt & ~VMX_VMCS_GUEST_PENDING_DEBUG_VALID_MASK))
5704 { /* likely */ }
5705 else
5706 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRsvd);
5707
5708 if ( (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5709 || pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5710 {
5711 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_TF)
5712 && !(pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF)
5713 && !(uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
5714 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf);
5715
5716 if ( ( !(pVmcs->u64GuestRFlags.u & X86_EFL_TF)
5717 || (pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF))
5718 && (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
5719 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf);
5720 }
5721
5722 /* We don't support RTM (Real-time Transactional Memory) yet. */
5723 if (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_RTM)
5724 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRtm);
5725
5726 /*
5727 * VMCS link pointer.
5728 */
5729 if (pVmcs->u64VmcsLinkPtr.u != UINT64_C(0xffffffffffffffff))
5730 {
5731 RTGCPHYS const GCPhysShadowVmcs = pVmcs->u64VmcsLinkPtr.u;
5732 /* We don't support SMM yet (so VMCS link pointer cannot be the current VMCS). */
5733 if (GCPhysShadowVmcs != IEM_VMX_GET_CURRENT_VMCS(pVCpu))
5734 { /* likely */ }
5735 else
5736 {
5737 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5738 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrCurVmcs);
5739 }
5740
5741 /* Validate the address. */
5742 if ( (GCPhysShadowVmcs & X86_PAGE_4K_OFFSET_MASK)
5743 || (GCPhysShadowVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5744 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysShadowVmcs))
5745 {
5746 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5747 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmcsLinkPtr);
5748 }
5749
5750 /* Read the VMCS-link pointer from guest memory. */
5751 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs));
5752 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs),
5753 GCPhysShadowVmcs, VMX_V_VMCS_SIZE);
5754 if (RT_FAILURE(rc))
5755 {
5756 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5757 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys);
5758 }
5759
5760 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
5761 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID)
5762 { /* likely */ }
5763 else
5764 {
5765 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5766 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrRevId);
5767 }
5768
5769 /* Verify the shadow bit is set if VMCS shadowing is enabled . */
5770 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
5771 || pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.fIsShadowVmcs)
5772 { /* likely */ }
5773 else
5774 {
5775 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5776 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrShadow);
5777 }
5778
5779 /* Finally update our cache of the guest physical address of the shadow VMCS. */
5780 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs = GCPhysShadowVmcs;
5781 }
5782
5783 NOREF(pszInstr);
5784 NOREF(pszFailure);
5785 return VINF_SUCCESS;
5786}
5787
5788
5789/**
5790 * Checks if the PDPTEs referenced by the nested-guest CR3 are valid as part of
5791 * VM-entry.
5792 *
5793 * @returns @c true if all PDPTEs are valid, @c false otherwise.
5794 * @param pVCpu The cross context virtual CPU structure.
5795 * @param pszInstr The VMX instruction name (for logging purposes).
5796 * @param pVmcs Pointer to the virtual VMCS.
5797 */
5798IEM_STATIC int iemVmxVmentryCheckGuestPdptesForCr3(PVMCPU pVCpu, const char *pszInstr, PVMXVVMCS pVmcs)
5799{
5800 /*
5801 * Check PDPTEs.
5802 * See Intel spec. 4.4.1 "PDPTE Registers".
5803 */
5804 uint64_t const uGuestCr3 = pVmcs->u64GuestCr3.u & X86_CR3_PAE_PAGE_MASK;
5805 const char *const pszFailure = "VM-exit";
5806
5807 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
5808 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uGuestCr3, sizeof(aPdptes));
5809 if (RT_SUCCESS(rc))
5810 {
5811 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
5812 {
5813 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
5814 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
5815 { /* likely */ }
5816 else
5817 {
5818 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
5819 VMXVDIAG const enmDiag = iemVmxGetDiagVmentryPdpteRsvd(iPdpte);
5820 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5821 }
5822 }
5823 }
5824 else
5825 {
5826 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
5827 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys);
5828 }
5829
5830 NOREF(pszFailure);
5831 return rc;
5832}
5833
5834
5835/**
5836 * Checks guest PDPTEs as part of VM-entry.
5837 *
5838 * @param pVCpu The cross context virtual CPU structure.
5839 * @param pszInstr The VMX instruction name (for logging purposes).
5840 */
5841IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPU pVCpu, const char *pszInstr)
5842{
5843 /*
5844 * Guest PDPTEs.
5845 * See Intel spec. 26.3.1.5 "Checks on Guest Page-Directory-Pointer-Table Entries".
5846 */
5847 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5848 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5849
5850 /* Check PDPTes if the VM-entry is to a guest using PAE paging. */
5851 int rc;
5852 if ( !fGstInLongMode
5853 && (pVmcs->u64GuestCr4.u & X86_CR4_PAE)
5854 && (pVmcs->u64GuestCr0.u & X86_CR0_PG))
5855 {
5856 /*
5857 * We don't support nested-paging for nested-guests yet.
5858 *
5859 * Without nested-paging for nested-guests, PDPTEs in the VMCS are not used,
5860 * rather we need to check the PDPTEs referenced by the guest CR3.
5861 */
5862 rc = iemVmxVmentryCheckGuestPdptesForCr3(pVCpu, pszInstr, pVmcs);
5863 }
5864 else
5865 rc = VINF_SUCCESS;
5866 return rc;
5867}
5868
5869
5870/**
5871 * Checks guest-state as part of VM-entry.
5872 *
5873 * @returns VBox status code.
5874 * @param pVCpu The cross context virtual CPU structure.
5875 * @param pszInstr The VMX instruction name (for logging purposes).
5876 */
5877IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr)
5878{
5879 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
5880 if (RT_SUCCESS(rc))
5881 {
5882 rc = iemVmxVmentryCheckGuestSegRegs(pVCpu, pszInstr);
5883 if (RT_SUCCESS(rc))
5884 {
5885 rc = iemVmxVmentryCheckGuestGdtrIdtr(pVCpu, pszInstr);
5886 if (RT_SUCCESS(rc))
5887 {
5888 rc = iemVmxVmentryCheckGuestRipRFlags(pVCpu, pszInstr);
5889 if (RT_SUCCESS(rc))
5890 {
5891 rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr);
5892 if (RT_SUCCESS(rc))
5893 return iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr);
5894 }
5895 }
5896 }
5897 }
5898 return rc;
5899}
5900
5901
5902/**
5903 * Checks host-state as part of VM-entry.
5904 *
5905 * @returns VBox status code.
5906 * @param pVCpu The cross context virtual CPU structure.
5907 * @param pszInstr The VMX instruction name (for logging purposes).
5908 */
5909IEM_STATIC int iemVmxVmentryCheckHostState(PVMCPU pVCpu, const char *pszInstr)
5910{
5911 /*
5912 * Host Control Registers and MSRs.
5913 * See Intel spec. 26.2.2 "Checks on Host Control Registers and MSRs".
5914 */
5915 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5916 const char * const pszFailure = "VMFail";
5917
5918 /* CR0 reserved bits. */
5919 {
5920 /* CR0 MB1 bits. */
5921 uint64_t const u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
5922 if ((pVmcs->u64HostCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0)
5923 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0);
5924
5925 /* CR0 MBZ bits. */
5926 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
5927 if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1)
5928 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1);
5929 }
5930
5931 /* CR4 reserved bits. */
5932 {
5933 /* CR4 MB1 bits. */
5934 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
5935 if ((pVmcs->u64HostCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
5936 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0);
5937
5938 /* CR4 MBZ bits. */
5939 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
5940 if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1)
5941 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1);
5942 }
5943
5944 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5945 {
5946 /* CR3 reserved bits. */
5947 if (!(pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
5948 { /* likely */ }
5949 else
5950 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr3);
5951
5952 /* SYSENTER ESP and SYSENTER EIP. */
5953 if ( X86_IS_CANONICAL(pVmcs->u64HostSysenterEsp.u)
5954 && X86_IS_CANONICAL(pVmcs->u64HostSysenterEip.u))
5955 { /* likely */ }
5956 else
5957 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSysenterEspEip);
5958 }
5959
5960 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
5961 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PERF_MSR));
5962
5963 /* PAT MSR. */
5964 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
5965 || CPUMIsPatMsrValid(pVmcs->u64HostPatMsr.u))
5966 { /* likely */ }
5967 else
5968 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostPatMsr);
5969
5970 /* EFER MSR. */
5971 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
5972 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
5973 || !(pVmcs->u64HostEferMsr.u & ~uValidEferMask))
5974 { /* likely */ }
5975 else
5976 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsrRsvd);
5977
5978 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
5979 bool const fHostLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
5980 bool const fHostLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
5981 if ( fHostInLongMode == fHostLma
5982 && fHostInLongMode == fHostLme)
5983 { /* likely */ }
5984 else
5985 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsr);
5986
5987 /*
5988 * Host Segment and Descriptor-Table Registers.
5989 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
5990 */
5991 /* Selector RPL and TI. */
5992 if ( !(pVmcs->HostCs & (X86_SEL_RPL | X86_SEL_LDT))
5993 && !(pVmcs->HostSs & (X86_SEL_RPL | X86_SEL_LDT))
5994 && !(pVmcs->HostDs & (X86_SEL_RPL | X86_SEL_LDT))
5995 && !(pVmcs->HostEs & (X86_SEL_RPL | X86_SEL_LDT))
5996 && !(pVmcs->HostFs & (X86_SEL_RPL | X86_SEL_LDT))
5997 && !(pVmcs->HostGs & (X86_SEL_RPL | X86_SEL_LDT))
5998 && !(pVmcs->HostTr & (X86_SEL_RPL | X86_SEL_LDT)))
5999 { /* likely */ }
6000 else
6001 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSel);
6002
6003 /* CS and TR selectors cannot be 0. */
6004 if ( pVmcs->HostCs
6005 && pVmcs->HostTr)
6006 { /* likely */ }
6007 else
6008 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCsTr);
6009
6010 /* SS cannot be 0 if 32-bit host. */
6011 if ( fHostInLongMode
6012 || pVmcs->HostSs)
6013 { /* likely */ }
6014 else
6015 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSs);
6016
6017 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6018 {
6019 /* FS, GS, GDTR, IDTR, TR base address. */
6020 if ( X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
6021 && X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
6022 && X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u)
6023 && X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u)
6024 && X86_IS_CANONICAL(pVmcs->u64HostTrBase.u))
6025 { /* likely */ }
6026 else
6027 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSegBase);
6028 }
6029
6030 /*
6031 * Host address-space size for 64-bit CPUs.
6032 * See Intel spec. 26.2.4 "Checks Related to Address-Space Size".
6033 */
6034 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
6035 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6036 {
6037 bool const fCpuInLongMode = CPUMIsGuestInLongMode(pVCpu);
6038
6039 /* Logical processor in IA-32e mode. */
6040 if (fCpuInLongMode)
6041 {
6042 if (fHostInLongMode)
6043 {
6044 /* PAE must be set. */
6045 if (pVmcs->u64HostCr4.u & X86_CR4_PAE)
6046 { /* likely */ }
6047 else
6048 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pae);
6049
6050 /* RIP must be canonical. */
6051 if (X86_IS_CANONICAL(pVmcs->u64HostRip.u))
6052 { /* likely */ }
6053 else
6054 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRip);
6055 }
6056 else
6057 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostLongMode);
6058 }
6059 else
6060 {
6061 /* Logical processor is outside IA-32e mode. */
6062 if ( !fGstInLongMode
6063 && !fHostInLongMode)
6064 {
6065 /* PCIDE should not be set. */
6066 if (!(pVmcs->u64HostCr4.u & X86_CR4_PCIDE))
6067 { /* likely */ }
6068 else
6069 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pcide);
6070
6071 /* The high 32-bits of RIP MBZ. */
6072 if (!pVmcs->u64HostRip.s.Hi)
6073 { /* likely */ }
6074 else
6075 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRipRsvd);
6076 }
6077 else
6078 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongMode);
6079 }
6080 }
6081 else
6082 {
6083 /* Host address-space size for 32-bit CPUs. */
6084 if ( !fGstInLongMode
6085 && !fHostInLongMode)
6086 { /* likely */ }
6087 else
6088 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongModeNoCpu);
6089 }
6090
6091 NOREF(pszInstr);
6092 NOREF(pszFailure);
6093 return VINF_SUCCESS;
6094}
6095
6096
6097/**
6098 * Checks VM-entry controls fields as part of VM-entry.
6099 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
6100 *
6101 * @returns VBox status code.
6102 * @param pVCpu The cross context virtual CPU structure.
6103 * @param pszInstr The VMX instruction name (for logging purposes).
6104 */
6105IEM_STATIC int iemVmxVmentryCheckEntryCtls(PVMCPU pVCpu, const char *pszInstr)
6106{
6107 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6108 const char * const pszFailure = "VMFail";
6109
6110 /* VM-entry controls. */
6111 VMXCTLSMSR EntryCtls;
6112 EntryCtls.u = CPUMGetGuestIa32VmxEntryCtls(pVCpu);
6113 if (~pVmcs->u32EntryCtls & EntryCtls.n.disallowed0)
6114 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
6115
6116 if (pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1)
6117 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1);
6118
6119 /* Event injection. */
6120 uint32_t const uIntInfo = pVmcs->u32EntryIntInfo;
6121 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VALID))
6122 {
6123 /* Type and vector. */
6124 uint8_t const uType = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_TYPE);
6125 uint8_t const uVector = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VECTOR);
6126 uint8_t const uRsvd = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_RSVD_12_30);
6127 if ( !uRsvd
6128 && HMVmxIsEntryIntInfoTypeValid(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxMonitorTrapFlag, uType)
6129 && HMVmxIsEntryIntInfoVectorValid(uVector, uType))
6130 { /* likely */ }
6131 else
6132 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd);
6133
6134 /* Exception error code. */
6135 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID))
6136 {
6137 /* Delivery possible only in Unrestricted-guest mode when CR0.PE is set. */
6138 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
6139 || (pVmcs->u64GuestCr0.s.Lo & X86_CR0_PE))
6140 { /* likely */ }
6141 else
6142 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodePe);
6143
6144 /* Exceptions that provide an error code. */
6145 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
6146 && ( uVector == X86_XCPT_DF
6147 || uVector == X86_XCPT_TS
6148 || uVector == X86_XCPT_NP
6149 || uVector == X86_XCPT_SS
6150 || uVector == X86_XCPT_GP
6151 || uVector == X86_XCPT_PF
6152 || uVector == X86_XCPT_AC))
6153 { /* likely */ }
6154 else
6155 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec);
6156
6157 /* Exception error-code reserved bits. */
6158 if (!(pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK))
6159 { /* likely */ }
6160 else
6161 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd);
6162
6163 /* Injecting a software interrupt, software exception or privileged software exception. */
6164 if ( uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
6165 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
6166 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
6167 {
6168 /* Instruction length must be in the range 0-15. */
6169 if (pVmcs->u32EntryInstrLen <= VMX_ENTRY_INSTR_LEN_MAX)
6170 { /* likely */ }
6171 else
6172 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLen);
6173
6174 /* Instruction length of 0 is allowed only when its CPU feature is present. */
6175 if ( pVmcs->u32EntryInstrLen == 0
6176 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEntryInjectSoftInt)
6177 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLenZero);
6178 }
6179 }
6180 }
6181
6182 /* VM-entry MSR-load count and VM-entry MSR-load area address. */
6183 if (pVmcs->u32EntryMsrLoadCount)
6184 {
6185 if ( (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
6186 || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6187 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
6188 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad);
6189 }
6190
6191 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)); /* We don't support SMM yet. */
6192 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON)); /* We don't support dual-monitor treatment yet. */
6193
6194 NOREF(pszInstr);
6195 NOREF(pszFailure);
6196 return VINF_SUCCESS;
6197}
6198
6199
6200/**
6201 * Checks VM-exit controls fields as part of VM-entry.
6202 * See Intel spec. 26.2.1.2 "VM-Exit Control Fields".
6203 *
6204 * @returns VBox status code.
6205 * @param pVCpu The cross context virtual CPU structure.
6206 * @param pszInstr The VMX instruction name (for logging purposes).
6207 */
6208IEM_STATIC int iemVmxVmentryCheckExitCtls(PVMCPU pVCpu, const char *pszInstr)
6209{
6210 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6211 const char * const pszFailure = "VMFail";
6212
6213 /* VM-exit controls. */
6214 VMXCTLSMSR ExitCtls;
6215 ExitCtls.u = CPUMGetGuestIa32VmxExitCtls(pVCpu);
6216 if (~pVmcs->u32ExitCtls & ExitCtls.n.disallowed0)
6217 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
6218
6219 if (pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1)
6220 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1);
6221
6222 /* Save preemption timer without activating it. */
6223 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
6224 && (pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
6225 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer);
6226
6227 /* VM-exit MSR-store count and VM-exit MSR-store area address. */
6228 if (pVmcs->u32ExitMsrStoreCount)
6229 {
6230 if ( (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
6231 || (pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6232 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
6233 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore);
6234 }
6235
6236 /* VM-exit MSR-load count and VM-exit MSR-load area address. */
6237 if (pVmcs->u32ExitMsrLoadCount)
6238 {
6239 if ( (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
6240 || (pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6241 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
6242 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad);
6243 }
6244
6245 NOREF(pszInstr);
6246 NOREF(pszFailure);
6247 return VINF_SUCCESS;
6248}
6249
6250
6251/**
6252 * Checks VM-execution controls fields as part of VM-entry.
6253 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
6254 *
6255 * @returns VBox status code.
6256 * @param pVCpu The cross context virtual CPU structure.
6257 * @param pszInstr The VMX instruction name (for logging purposes).
6258 *
6259 * @remarks This may update secondary-processor based VM-execution control fields
6260 * in the current VMCS if necessary.
6261 */
6262IEM_STATIC int iemVmxVmentryCheckExecCtls(PVMCPU pVCpu, const char *pszInstr)
6263{
6264 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6265 const char * const pszFailure = "VMFail";
6266
6267 /* Pin-based VM-execution controls. */
6268 {
6269 VMXCTLSMSR PinCtls;
6270 PinCtls.u = CPUMGetGuestIa32VmxPinbasedCtls(pVCpu);
6271 if (~pVmcs->u32PinCtls & PinCtls.n.disallowed0)
6272 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
6273
6274 if (pVmcs->u32PinCtls & ~PinCtls.n.allowed1)
6275 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1);
6276 }
6277
6278 /* Processor-based VM-execution controls. */
6279 {
6280 VMXCTLSMSR ProcCtls;
6281 ProcCtls.u = CPUMGetGuestIa32VmxProcbasedCtls(pVCpu);
6282 if (~pVmcs->u32ProcCtls & ProcCtls.n.disallowed0)
6283 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
6284
6285 if (pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1)
6286 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1);
6287 }
6288
6289 /* Secondary processor-based VM-execution controls. */
6290 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
6291 {
6292 VMXCTLSMSR ProcCtls2;
6293 ProcCtls2.u = CPUMGetGuestIa32VmxProcbasedCtls2(pVCpu);
6294 if (~pVmcs->u32ProcCtls2 & ProcCtls2.n.disallowed0)
6295 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
6296
6297 if (pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1)
6298 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1);
6299 }
6300 else
6301 Assert(!pVmcs->u32ProcCtls2);
6302
6303 /* CR3-target count. */
6304 if (pVmcs->u32Cr3TargetCount <= VMX_V_CR3_TARGET_COUNT)
6305 { /* likely */ }
6306 else
6307 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Cr3TargetCount);
6308
6309 /* I/O bitmaps physical addresses. */
6310 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
6311 {
6312 if ( (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK)
6313 || (pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6314 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u))
6315 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA);
6316
6317 if ( (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK)
6318 || (pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6319 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u))
6320 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
6321 }
6322
6323 /* MSR bitmap physical address. */
6324 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
6325 {
6326 RTGCPHYS const GCPhysMsrBitmap = pVmcs->u64AddrMsrBitmap.u;
6327 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
6328 || (GCPhysMsrBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6329 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysMsrBitmap))
6330 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
6331
6332 /* Read the MSR bitmap. */
6333 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
6334 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap),
6335 GCPhysMsrBitmap, VMX_V_MSR_BITMAP_SIZE);
6336 if (RT_FAILURE(rc))
6337 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrBitmapPtrReadPhys);
6338 }
6339
6340 /* TPR shadow related controls. */
6341 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
6342 {
6343 /* Virtual-APIC page physical address. */
6344 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
6345 if ( (GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
6346 || (GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6347 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
6348 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
6349
6350 /* Read the Virtual-APIC page. */
6351 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
6352 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage),
6353 GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES);
6354 if (RT_FAILURE(rc))
6355 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
6356
6357 /* TPR threshold without virtual-interrupt delivery. */
6358 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
6359 && (pVmcs->u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK))
6360 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdRsvd);
6361
6362 /* TPR threshold and VTPR. */
6363 uint8_t const *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
6364 uint8_t const u8VTpr = *(pbVirtApic + XAPIC_OFF_TPR);
6365 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
6366 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
6367 && RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) > ((u8VTpr >> 4) & UINT32_C(0xf)) /* Bits 4:7 */)
6368 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
6369 }
6370 else
6371 {
6372 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6373 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
6374 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
6375 { /* likely */ }
6376 else
6377 {
6378 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6379 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicTprShadow);
6380 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
6381 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ApicRegVirt);
6382 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
6383 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtIntDelivery);
6384 }
6385 }
6386
6387 /* NMI exiting and virtual-NMIs. */
6388 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
6389 && (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
6390 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi);
6391
6392 /* Virtual-NMIs and NMI-window exiting. */
6393 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6394 && (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
6395 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit);
6396
6397 /* Virtualize APIC accesses. */
6398 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
6399 {
6400 /* APIC-access physical address. */
6401 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
6402 if ( (GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
6403 || (GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6404 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
6405 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
6406
6407 /*
6408 * Disallow APIC-access page and virtual-APIC page from being the same address.
6409 * Note! This is not an Intel requirement, but one imposed by our implementation.
6410 */
6411 /** @todo r=ramshankar: This is done primarily to simplify recursion scenarios while
6412 * redirecting accesses between the APIC-access page and the virtual-APIC
6413 * page. If any nested hypervisor requires this, we can implement it later. */
6414 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
6415 {
6416 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
6417 if (GCPhysVirtApic == GCPhysApicAccess)
6418 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessEqVirtApic);
6419 }
6420
6421 /*
6422 * Register the handler for the APIC-access page.
6423 *
6424 * We don't deregister the APIC-access page handler during the VM-exit as a different
6425 * nested-VCPU might be using the same guest-physical address for its APIC-access page.
6426 *
6427 * We leave the page registered until the first access that happens outside VMX non-root
6428 * mode. Guest software is allowed to access structures such as the APIC-access page
6429 * only when no logical processor with a current VMCS references it in VMX non-root mode,
6430 * otherwise it can lead to unpredictable behavior including guest triple-faults.
6431 *
6432 * See Intel spec. 24.11.4 "Software Access to Related Structures".
6433 */
6434 int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess,
6435 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
6436 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
6437 if (RT_FAILURE(rc))
6438 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessHandlerReg);
6439 }
6440
6441 /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */
6442 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6443 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
6444 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
6445
6446 /* Virtual-interrupt delivery requires external interrupt exiting. */
6447 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
6448 && !(pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
6449 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
6450
6451 /* VPID. */
6452 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID)
6453 || pVmcs->u16Vpid != 0)
6454 { /* likely */ }
6455 else
6456 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Vpid);
6457
6458 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_POSTED_INT)); /* We don't support posted interrupts yet. */
6459 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)); /* We don't support EPT yet. */
6460 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PML)); /* We don't support PML yet. */
6461 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)); /* We don't support Unrestricted-guests yet. */
6462 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMFUNC)); /* We don't support VM functions yet. */
6463 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT_VE)); /* We don't support EPT-violation #VE yet. */
6464 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)); /* We don't support Pause-loop exiting yet. */
6465
6466 /* VMCS shadowing. */
6467 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
6468 {
6469 /* VMREAD-bitmap physical address. */
6470 RTGCPHYS const GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
6471 if ( ( GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
6472 || ( GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6473 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
6474 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap);
6475
6476 /* VMWRITE-bitmap physical address. */
6477 RTGCPHYS const GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
6478 if ( ( GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
6479 || ( GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6480 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
6481 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
6482
6483 /* Read the VMREAD-bitmap. */
6484 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
6485 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap),
6486 GCPhysVmreadBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
6487 if (RT_FAILURE(rc))
6488 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys);
6489
6490 /* Read the VMWRITE-bitmap. */
6491 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap));
6492 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap),
6493 GCPhysVmwriteBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
6494 if (RT_FAILURE(rc))
6495 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys);
6496 }
6497
6498 NOREF(pszInstr);
6499 NOREF(pszFailure);
6500 return VINF_SUCCESS;
6501}
6502
6503
6504/**
6505 * Loads the guest control registers, debug register and some MSRs as part of
6506 * VM-entry.
6507 *
6508 * @param pVCpu The cross context virtual CPU structure.
6509 */
6510IEM_STATIC void iemVmxVmentryLoadGuestControlRegsMsrs(PVMCPU pVCpu)
6511{
6512 /*
6513 * Load guest control registers, debug registers and MSRs.
6514 * See Intel spec. 26.3.2.1 "Loading Guest Control Registers, Debug Registers and MSRs".
6515 */
6516 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6517 uint64_t const uGstCr0 = (pVmcs->u64GuestCr0.u & ~VMX_ENTRY_CR0_IGNORE_MASK)
6518 | (pVCpu->cpum.GstCtx.cr0 & VMX_ENTRY_CR0_IGNORE_MASK);
6519 CPUMSetGuestCR0(pVCpu, uGstCr0);
6520 CPUMSetGuestCR4(pVCpu, pVmcs->u64GuestCr4.u);
6521 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64GuestCr3.u;
6522
6523 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
6524 pVCpu->cpum.GstCtx.dr[7] = (pVmcs->u64GuestDr7.u & ~VMX_ENTRY_DR7_MBZ_MASK) | VMX_ENTRY_DR7_MB1_MASK;
6525
6526 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64GuestSysenterEip.s.Lo;
6527 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64GuestSysenterEsp.s.Lo;
6528 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32GuestSysenterCS;
6529
6530 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6531 {
6532 /* FS base and GS base are loaded while loading the rest of the guest segment registers. */
6533
6534 /* EFER MSR. */
6535 if (!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR))
6536 {
6537 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
6538 bool const fGstPaging = RT_BOOL(uGstCr0 & X86_CR0_PG);
6539 uint64_t const uHostEfer = pVCpu->cpum.GstCtx.msrEFER;
6540 if (fGstInLongMode)
6541 {
6542 /* If the nested-guest is in long mode, LMA and LME are both set. */
6543 Assert(fGstPaging);
6544 pVCpu->cpum.GstCtx.msrEFER = uHostEfer | (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
6545 }
6546 else
6547 {
6548 /*
6549 * If the nested-guest is outside long mode:
6550 * - With paging: LMA is cleared, LME is cleared.
6551 * - Without paging: LMA is cleared, LME is left unmodified.
6552 */
6553 uint64_t const fLmaLmeMask = MSR_K6_EFER_LMA | (fGstPaging ? MSR_K6_EFER_LME : 0);
6554 pVCpu->cpum.GstCtx.msrEFER = uHostEfer & ~fLmaLmeMask;
6555 }
6556 }
6557 /* else: see below. */
6558 }
6559
6560 /* PAT MSR. */
6561 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
6562 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64GuestPatMsr.u;
6563
6564 /* EFER MSR. */
6565 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
6566 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64GuestEferMsr.u;
6567
6568 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
6569 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
6570
6571 /* We don't support IA32_BNDCFGS MSR yet. */
6572 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
6573
6574 /* Nothing to do for SMBASE register - We don't support SMM yet. */
6575}
6576
6577
6578/**
6579 * Loads the guest segment registers, GDTR, IDTR, LDTR and TR as part of VM-entry.
6580 *
6581 * @param pVCpu The cross context virtual CPU structure.
6582 */
6583IEM_STATIC void iemVmxVmentryLoadGuestSegRegs(PVMCPU pVCpu)
6584{
6585 /*
6586 * Load guest segment registers, GDTR, IDTR, LDTR and TR.
6587 * See Intel spec. 26.3.2.2 "Loading Guest Segment Registers and Descriptor-Table Registers".
6588 */
6589 /* CS, SS, ES, DS, FS, GS. */
6590 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6591 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
6592 {
6593 PCPUMSELREG pGstSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6594 CPUMSELREG VmcsSelReg;
6595 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &VmcsSelReg);
6596 AssertRC(rc); NOREF(rc);
6597 if (!(VmcsSelReg.Attr.u & X86DESCATTR_UNUSABLE))
6598 {
6599 pGstSelReg->Sel = VmcsSelReg.Sel;
6600 pGstSelReg->ValidSel = VmcsSelReg.Sel;
6601 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6602 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6603 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
6604 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
6605 }
6606 else
6607 {
6608 pGstSelReg->Sel = VmcsSelReg.Sel;
6609 pGstSelReg->ValidSel = VmcsSelReg.Sel;
6610 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6611 switch (iSegReg)
6612 {
6613 case X86_SREG_CS:
6614 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6615 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
6616 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
6617 break;
6618
6619 case X86_SREG_SS:
6620 pGstSelReg->u64Base = VmcsSelReg.u64Base & UINT32_C(0xfffffff0);
6621 pGstSelReg->u32Limit = 0;
6622 pGstSelReg->Attr.u = (VmcsSelReg.Attr.u & X86DESCATTR_DPL) | X86DESCATTR_D | X86DESCATTR_UNUSABLE;
6623 break;
6624
6625 case X86_SREG_ES:
6626 case X86_SREG_DS:
6627 pGstSelReg->u64Base = 0;
6628 pGstSelReg->u32Limit = 0;
6629 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
6630 break;
6631
6632 case X86_SREG_FS:
6633 case X86_SREG_GS:
6634 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6635 pGstSelReg->u32Limit = 0;
6636 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
6637 break;
6638 }
6639 Assert(pGstSelReg->Attr.n.u1Unusable);
6640 }
6641 }
6642
6643 /* LDTR. */
6644 pVCpu->cpum.GstCtx.ldtr.Sel = pVmcs->GuestLdtr;
6645 pVCpu->cpum.GstCtx.ldtr.ValidSel = pVmcs->GuestLdtr;
6646 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
6647 if (!(pVmcs->u32GuestLdtrAttr & X86DESCATTR_UNUSABLE))
6648 {
6649 pVCpu->cpum.GstCtx.ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
6650 pVCpu->cpum.GstCtx.ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
6651 pVCpu->cpum.GstCtx.ldtr.Attr.u = pVmcs->u32GuestLdtrAttr;
6652 }
6653 else
6654 {
6655 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
6656 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
6657 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
6658 }
6659
6660 /* TR. */
6661 Assert(!(pVmcs->u32GuestTrAttr & X86DESCATTR_UNUSABLE));
6662 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->GuestTr;
6663 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->GuestTr;
6664 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
6665 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64GuestTrBase.u;
6666 pVCpu->cpum.GstCtx.tr.u32Limit = pVmcs->u32GuestTrLimit;
6667 pVCpu->cpum.GstCtx.tr.Attr.u = pVmcs->u32GuestTrAttr;
6668
6669 /* GDTR. */
6670 pVCpu->cpum.GstCtx.gdtr.cbGdt = pVmcs->u32GuestGdtrLimit;
6671 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64GuestGdtrBase.u;
6672
6673 /* IDTR. */
6674 pVCpu->cpum.GstCtx.idtr.cbIdt = pVmcs->u32GuestIdtrLimit;
6675 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64GuestIdtrBase.u;
6676}
6677
6678
6679/**
6680 * Loads the guest MSRs from the VM-entry auto-load MSRs as part of VM-entry.
6681 *
6682 * @returns VBox status code.
6683 * @param pVCpu The cross context virtual CPU structure.
6684 * @param pszInstr The VMX instruction name (for logging purposes).
6685 */
6686IEM_STATIC int iemVmxVmentryLoadGuestAutoMsrs(PVMCPU pVCpu, const char *pszInstr)
6687{
6688 /*
6689 * Load guest MSRs.
6690 * See Intel spec. 26.4 "Loading MSRs".
6691 */
6692 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6693 const char *const pszFailure = "VM-exit";
6694
6695 /*
6696 * The VM-entry MSR-load area address need not be a valid guest-physical address if the
6697 * VM-entry MSR load count is 0. If this is the case, bail early without reading it.
6698 * See Intel spec. 24.8.2 "VM-Entry Controls for MSRs".
6699 */
6700 uint32_t const cMsrs = pVmcs->u32EntryMsrLoadCount;
6701 if (!cMsrs)
6702 return VINF_SUCCESS;
6703
6704 /*
6705 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count is
6706 * exceeded including possibly raising #MC exceptions during VMX transition. Our
6707 * implementation shall fail VM-entry with an VMX_EXIT_ERR_MSR_LOAD VM-exit.
6708 */
6709 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
6710 if (fIsMsrCountValid)
6711 { /* likely */ }
6712 else
6713 {
6714 iemVmxVmcsSetExitQual(pVCpu, VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
6715 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadCount);
6716 }
6717
6718 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrEntryMsrLoad.u;
6719 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
6720 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
6721 if (RT_SUCCESS(rc))
6722 {
6723 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
6724 Assert(pMsr);
6725 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
6726 {
6727 if ( !pMsr->u32Reserved
6728 && pMsr->u32Msr != MSR_K8_FS_BASE
6729 && pMsr->u32Msr != MSR_K8_GS_BASE
6730 && pMsr->u32Msr != MSR_K6_EFER
6731 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
6732 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
6733 {
6734 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
6735 if (rcStrict == VINF_SUCCESS)
6736 continue;
6737
6738 /*
6739 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-entry.
6740 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VM-entry failure
6741 * recording the MSR index in the VM-exit qualification (as per the Intel spec.) and indicated
6742 * further by our own, specific diagnostic code. Later, we can try implement handling of the
6743 * MSR in ring-0 if possible, or come up with a better, generic solution.
6744 */
6745 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
6746 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
6747 ? kVmxVDiag_Vmentry_MsrLoadRing3
6748 : kVmxVDiag_Vmentry_MsrLoad;
6749 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
6750 }
6751 else
6752 {
6753 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
6754 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadRsvd);
6755 }
6756 }
6757 }
6758 else
6759 {
6760 AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysAutoMsrArea, rc));
6761 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadPtrReadPhys);
6762 }
6763
6764 NOREF(pszInstr);
6765 NOREF(pszFailure);
6766 return VINF_SUCCESS;
6767}
6768
6769
6770/**
6771 * Loads the guest-state non-register state as part of VM-entry.
6772 *
6773 * @returns VBox status code.
6774 * @param pVCpu The cross context virtual CPU structure.
6775 *
6776 * @remarks This must be called only after loading the nested-guest register state
6777 * (especially nested-guest RIP).
6778 */
6779IEM_STATIC void iemVmxVmentryLoadGuestNonRegState(PVMCPU pVCpu)
6780{
6781 /*
6782 * Load guest non-register state.
6783 * See Intel spec. 26.6 "Special Features of VM Entry"
6784 */
6785 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6786 uint32_t const uEntryIntInfo = pVmcs->u32EntryIntInfo;
6787 if (!HMVmxIsVmentryVectoring(uEntryIntInfo))
6788 {
6789 if (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
6790 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
6791 else
6792 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
6793
6794 /* SMI blocking is irrelevant. We don't support SMIs yet. */
6795 }
6796 else
6797 {
6798 /* When the VM-entry is not vectoring, there is no blocking by STI or Mov-SS. */
6799 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6800 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6801 }
6802
6803 /* NMI blocking. */
6804 if ( (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
6805 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
6806 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6807
6808 /** @todo NSTVMX: Pending debug exceptions. */
6809 Assert(!(pVmcs->u64GuestPendingDbgXcpt.u));
6810
6811 /* Loading PDPTEs will be taken care when we switch modes. We don't support EPT yet. */
6812 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
6813
6814 /* VPID is irrelevant. We don't support VPID yet. */
6815
6816 /* Clear address-range monitoring. */
6817 EMMonitorWaitClear(pVCpu);
6818}
6819
6820
6821/**
6822 * Loads the guest-state as part of VM-entry.
6823 *
6824 * @returns VBox status code.
6825 * @param pVCpu The cross context virtual CPU structure.
6826 * @param pszInstr The VMX instruction name (for logging purposes).
6827 *
6828 * @remarks This must be done after all the necessary steps prior to loading of
6829 * guest-state (e.g. checking various VMCS state).
6830 */
6831IEM_STATIC int iemVmxVmentryLoadGuestState(PVMCPU pVCpu, const char *pszInstr)
6832{
6833 iemVmxVmentryLoadGuestControlRegsMsrs(pVCpu);
6834 iemVmxVmentryLoadGuestSegRegs(pVCpu);
6835
6836 /*
6837 * Load guest RIP, RSP and RFLAGS.
6838 * See Intel spec. 26.3.2.3 "Loading Guest RIP, RSP and RFLAGS".
6839 */
6840 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6841 pVCpu->cpum.GstCtx.rsp = pVmcs->u64GuestRsp.u;
6842 pVCpu->cpum.GstCtx.rip = pVmcs->u64GuestRip.u;
6843 pVCpu->cpum.GstCtx.rflags.u = pVmcs->u64GuestRFlags.u;
6844
6845 /* Initialize the PAUSE-loop controls as part of VM-entry. */
6846 pVCpu->cpum.GstCtx.hwvirt.vmx.uFirstPauseLoopTick = 0;
6847 pVCpu->cpum.GstCtx.hwvirt.vmx.uPrevPauseTick = 0;
6848
6849 iemVmxVmentryLoadGuestNonRegState(pVCpu);
6850
6851 NOREF(pszInstr);
6852 return VINF_SUCCESS;
6853}
6854
6855
6856/**
6857 * Set up the VMX-preemption timer.
6858 *
6859 * @param pVCpu The cross context virtual CPU structure.
6860 * @param pszInstr The VMX instruction name (for logging purposes).
6861 */
6862IEM_STATIC void iemVmxVmentrySetupPreemptTimer(PVMCPU pVCpu, const char *pszInstr)
6863{
6864 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6865 Assert(pVmcs);
6866 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
6867 {
6868 uint64_t const uVmentryTick = TMCpuTickGetNoCheck(pVCpu);
6869 pVCpu->cpum.GstCtx.hwvirt.vmx.uVmentryTick = uVmentryTick;
6870 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER);
6871
6872 Log(("%s: VM-entry set up VMX-preemption timer at %#RX64\n", pszInstr, uVmentryTick));
6873 }
6874 else
6875 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
6876
6877 NOREF(pszInstr);
6878}
6879
6880
6881/**
6882 * Performs event injection (if any) as part of VM-entry.
6883 *
6884 * @param pVCpu The cross context virtual CPU structure.
6885 * @param pszInstr The VMX instruction name (for logging purposes).
6886 */
6887IEM_STATIC int iemVmxVmentryInjectEvent(PVMCPU pVCpu, const char *pszInstr)
6888{
6889 /*
6890 * Inject events.
6891 * See Intel spec. 26.5 "Event Injection".
6892 */
6893 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6894 uint32_t const uEntryIntInfo = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u32EntryIntInfo;
6895 if (VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
6896 {
6897 /*
6898 * The event that is going to be made pending for injection is not subject to VMX intercepts,
6899 * thus we flag ignoring of intercepts. However, recursive exceptions if any during delivery
6900 * of the current event -are- subject to intercepts, hence this flag will be flipped during
6901 * the actually delivery of this event.
6902 */
6903 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = false;
6904
6905 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
6906 if (uType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT)
6907 {
6908 Assert(VMX_ENTRY_INT_INFO_VECTOR(uEntryIntInfo) == VMX_ENTRY_INT_INFO_VECTOR_MTF);
6909 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_MTF);
6910 return VINF_SUCCESS;
6911 }
6912
6913 int rc = HMVmxEntryIntInfoInjectTrpmEvent(pVCpu, uEntryIntInfo, pVmcs->u32EntryXcptErrCode, pVmcs->u32EntryInstrLen,
6914 pVCpu->cpum.GstCtx.cr2);
6915 AssertRCReturn(rc, rc);
6916 }
6917
6918 NOREF(pszInstr);
6919 return VINF_SUCCESS;
6920}
6921
6922
6923/**
6924 * VMLAUNCH/VMRESUME instruction execution worker.
6925 *
6926 * @returns Strict VBox status code.
6927 * @param pVCpu The cross context virtual CPU structure.
6928 * @param cbInstr The instruction length in bytes.
6929 * @param uInstrId The instruction identity (VMXINSTRID_VMLAUNCH or
6930 * VMXINSTRID_VMRESUME).
6931 * @param pExitInfo Pointer to the VM-exit instruction information struct.
6932 * Optional, can be NULL.
6933 *
6934 * @remarks Common VMX instruction checks are already expected to by the caller,
6935 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
6936 */
6937IEM_STATIC VBOXSTRICTRC iemVmxVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId, PCVMXVEXITINFO pExitInfo)
6938{
6939 Assert( uInstrId == VMXINSTRID_VMLAUNCH
6940 || uInstrId == VMXINSTRID_VMRESUME);
6941 const char *pszInstr = uInstrId == VMXINSTRID_VMRESUME ? "vmresume" : "vmlaunch";
6942
6943 /* Nested-guest intercept. */
6944 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6945 {
6946 if (pExitInfo)
6947 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
6948 uint32_t const uExitReason = uInstrId == VMXINSTRID_VMRESUME ? VMX_EXIT_VMRESUME : VMX_EXIT_VMLAUNCH;
6949 return iemVmxVmexitInstrNeedsInfo(pVCpu, uExitReason, uInstrId, cbInstr);
6950 }
6951
6952 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
6953
6954 /* CPL. */
6955 if (pVCpu->iem.s.uCpl > 0)
6956 {
6957 Log(("%s: CPL %u -> #GP(0)\n", pszInstr, pVCpu->iem.s.uCpl));
6958 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_Cpl;
6959 return iemRaiseGeneralProtectionFault0(pVCpu);
6960 }
6961
6962 /* Current VMCS valid. */
6963 if (!IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
6964 {
6965 Log(("%s: VMCS pointer %#RGp invalid -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
6966 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrInvalid;
6967 iemVmxVmFailInvalid(pVCpu);
6968 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6969 return VINF_SUCCESS;
6970 }
6971
6972 /** @todo Distinguish block-by-MOV-SS from block-by-STI. Currently we
6973 * use block-by-STI here which is not quite correct. */
6974 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
6975 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
6976 {
6977 Log(("%s: VM entry with events blocked by MOV SS -> VMFail\n", pszInstr));
6978 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_BlocKMovSS;
6979 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_BLOCK_MOVSS);
6980 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6981 return VINF_SUCCESS;
6982 }
6983
6984 if (uInstrId == VMXINSTRID_VMLAUNCH)
6985 {
6986 /* VMLAUNCH with non-clear VMCS. */
6987 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_STATE_CLEAR)
6988 { /* likely */ }
6989 else
6990 {
6991 Log(("vmlaunch: VMLAUNCH with non-clear VMCS -> VMFail\n"));
6992 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsClear;
6993 iemVmxVmFail(pVCpu, VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS);
6994 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6995 return VINF_SUCCESS;
6996 }
6997 }
6998 else
6999 {
7000 /* VMRESUME with non-launched VMCS. */
7001 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_STATE_LAUNCHED)
7002 { /* likely */ }
7003 else
7004 {
7005 Log(("vmresume: VMRESUME with non-launched VMCS -> VMFail\n"));
7006 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsLaunch;
7007 iemVmxVmFail(pVCpu, VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS);
7008 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7009 return VINF_SUCCESS;
7010 }
7011 }
7012
7013 /*
7014 * Load the current VMCS.
7015 */
7016 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
7017 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs),
7018 IEM_VMX_GET_CURRENT_VMCS(pVCpu), VMX_V_VMCS_SIZE);
7019 if (RT_FAILURE(rc))
7020 {
7021 Log(("%s: Failed to read VMCS at %#RGp, rc=%Rrc\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu), rc));
7022 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrReadPhys;
7023 return rc;
7024 }
7025
7026 /*
7027 * We are allowed to cache VMCS related data structures (such as I/O bitmaps, MSR bitmaps)
7028 * while entering VMX non-root mode. We do some of this while checking VM-execution
7029 * controls. The guest hypervisor should not make assumptions and cannot expect
7030 * predictable behavior if changes to these structures are made in guest memory while
7031 * executing in VMX non-root mode. As far as VirtualBox is concerned, the guest cannot
7032 * modify them anyway as we cache them in host memory. We are trade memory for speed here.
7033 *
7034 * See Intel spec. 24.11.4 "Software Access to Related Structures".
7035 */
7036 rc = iemVmxVmentryCheckExecCtls(pVCpu, pszInstr);
7037 if (RT_SUCCESS(rc))
7038 {
7039 rc = iemVmxVmentryCheckExitCtls(pVCpu, pszInstr);
7040 if (RT_SUCCESS(rc))
7041 {
7042 rc = iemVmxVmentryCheckEntryCtls(pVCpu, pszInstr);
7043 if (RT_SUCCESS(rc))
7044 {
7045 rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr);
7046 if (RT_SUCCESS(rc))
7047 {
7048 /* Initialize the VM-exit qualification field as it MBZ for VM-exits where it isn't specified. */
7049 iemVmxVmcsSetExitQual(pVCpu, 0);
7050
7051 /*
7052 * Blocking of NMIs need to be restored if VM-entry fails due to invalid-guest state.
7053 * So we save the required force flags here (currently only VMCPU_FF_BLOCK_NMI) so we
7054 * can restore it on VM-exit when required.
7055 */
7056 iemVmxVmentrySaveForceFlags(pVCpu);
7057
7058 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
7059 if (RT_SUCCESS(rc))
7060 {
7061 rc = iemVmxVmentryLoadGuestState(pVCpu, pszInstr);
7062 if (RT_SUCCESS(rc))
7063 {
7064 rc = iemVmxVmentryLoadGuestAutoMsrs(pVCpu, pszInstr);
7065 if (RT_SUCCESS(rc))
7066 {
7067 Assert(rc != VINF_CPUM_R3_MSR_WRITE);
7068
7069 /* VMLAUNCH instruction must update the VMCS launch state. */
7070 if (uInstrId == VMXINSTRID_VMLAUNCH)
7071 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = VMX_V_VMCS_STATE_LAUNCHED;
7072
7073 /* Perform the VMX transition (PGM updates). */
7074 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
7075 if (rcStrict == VINF_SUCCESS)
7076 { /* likely */ }
7077 else if (RT_SUCCESS(rcStrict))
7078 {
7079 Log3(("%s: iemVmxWorldSwitch returns %Rrc -> Setting passup status\n", pszInstr,
7080 VBOXSTRICTRC_VAL(rcStrict)));
7081 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7082 }
7083 else
7084 {
7085 Log3(("%s: iemVmxWorldSwitch failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
7086 return rcStrict;
7087 }
7088
7089 /* We've now entered nested-guest execution. */
7090 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true;
7091
7092 /*
7093 * The priority of potential VM-exits during VM-entry is important.
7094 * The priorities of VM-exits and events are listed from highest
7095 * to lowest as follows:
7096 *
7097 * 1. Event injection.
7098 * 2. TPR below threshold / APIC-write.
7099 * 3. SMI.
7100 * 4. INIT.
7101 * 5. MTF exit.
7102 * 6. Pending debug exceptions.
7103 * 7. Debug-trap exceptions.
7104 * 8. VMX-preemption timer.
7105 * 9. NMI-window exit.
7106 * 10. NMI injection.
7107 * 11. Interrupt-window exit.
7108 * 12. Interrupt injection.
7109 */
7110
7111 /* Setup the VMX-preemption timer. */
7112 iemVmxVmentrySetupPreemptTimer(pVCpu, pszInstr);
7113
7114 /* Now that we've switched page tables, we can inject events if any. */
7115 iemVmxVmentryInjectEvent(pVCpu, pszInstr);
7116
7117 return VINF_SUCCESS;
7118 }
7119 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED);
7120 }
7121 }
7122 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED);
7123 }
7124
7125 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_HOST_STATE);
7126 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7127 return VINF_SUCCESS;
7128 }
7129 }
7130 }
7131
7132 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
7133 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7134 return VINF_SUCCESS;
7135}
7136
7137
7138/**
7139 * Checks whether an RDMSR or WRMSR instruction for the given MSR is intercepted
7140 * (causes a VM-exit) or not.
7141 *
7142 * @returns @c true if the instruction is intercepted, @c false otherwise.
7143 * @param pVCpu The cross context virtual CPU structure.
7144 * @param uExitReason The VM-exit exit reason (VMX_EXIT_RDMSR or
7145 * VMX_EXIT_WRMSR).
7146 * @param idMsr The MSR.
7147 */
7148IEM_STATIC bool iemVmxIsRdmsrWrmsrInterceptSet(PVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr)
7149{
7150 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
7151 Assert( uExitReason == VMX_EXIT_RDMSR
7152 || uExitReason == VMX_EXIT_WRMSR);
7153
7154 /* Consult the MSR bitmap if the feature is supported. */
7155 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7156 Assert(pVmcs);
7157 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7158 {
7159 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
7160 if (uExitReason == VMX_EXIT_RDMSR)
7161 {
7162 VMXMSREXITREAD enmRead;
7163 int rc = HMVmxGetMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, &enmRead,
7164 NULL /* penmWrite */);
7165 AssertRC(rc);
7166 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
7167 return true;
7168 }
7169 else
7170 {
7171 VMXMSREXITWRITE enmWrite;
7172 int rc = HMVmxGetMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, NULL /* penmRead */,
7173 &enmWrite);
7174 AssertRC(rc);
7175 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
7176 return true;
7177 }
7178 return false;
7179 }
7180
7181 /* Without MSR bitmaps, all MSR accesses are intercepted. */
7182 return true;
7183}
7184
7185
7186/**
7187 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field is
7188 * intercepted (causes a VM-exit) or not.
7189 *
7190 * @returns @c true if the instruction is intercepted, @c false otherwise.
7191 * @param pVCpu The cross context virtual CPU structure.
7192 * @param u64FieldEnc The VMCS field encoding.
7193 * @param uExitReason The VM-exit exit reason (VMX_EXIT_VMREAD or
7194 * VMX_EXIT_VMREAD).
7195 */
7196IEM_STATIC bool iemVmxIsVmreadVmwriteInterceptSet(PVMCPU pVCpu, uint32_t uExitReason, uint64_t u64FieldEnc)
7197{
7198 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
7199 Assert( uExitReason == VMX_EXIT_VMREAD
7200 || uExitReason == VMX_EXIT_VMWRITE);
7201
7202 /* Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted. */
7203 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing)
7204 return true;
7205
7206 /*
7207 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE is intercepted.
7208 * This excludes any reserved bits in the valid parts of the field encoding (i.e. bit 12).
7209 */
7210 if (u64FieldEnc & VMX_VMCS_ENC_RSVD_MASK)
7211 return true;
7212
7213 /* Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not. */
7214 uint32_t u32FieldEnc = RT_LO_U32(u64FieldEnc);
7215 Assert(u32FieldEnc >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
7216 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
7217 uint8_t const *pbBitmap = uExitReason == VMX_EXIT_VMREAD
7218 ? (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap)
7219 : (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap);
7220 pbBitmap += (u32FieldEnc >> 3);
7221 if (*pbBitmap & RT_BIT(u32FieldEnc & 7))
7222 return true;
7223
7224 return false;
7225}
7226
7227
7228/**
7229 * VMREAD common (memory/register) instruction execution worker
7230 *
7231 * @returns Strict VBox status code.
7232 * @param pVCpu The cross context virtual CPU structure.
7233 * @param cbInstr The instruction length in bytes.
7234 * @param pu64Dst Where to write the VMCS value (only updated when
7235 * VINF_SUCCESS is returned).
7236 * @param u64FieldEnc The VMCS field encoding.
7237 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7238 * be NULL.
7239 */
7240IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
7241 PCVMXVEXITINFO pExitInfo)
7242{
7243 /* Nested-guest intercept. */
7244 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7245 && iemVmxIsVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMREAD, u64FieldEnc))
7246 {
7247 if (pExitInfo)
7248 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7249 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMREAD, VMXINSTRID_VMREAD, cbInstr);
7250 }
7251
7252 /* CPL. */
7253 if (pVCpu->iem.s.uCpl > 0)
7254 {
7255 Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7256 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Cpl;
7257 return iemRaiseGeneralProtectionFault0(pVCpu);
7258 }
7259
7260 /* VMCS pointer in root mode. */
7261 if ( IEM_VMX_IS_ROOT_MODE(pVCpu)
7262 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
7263 {
7264 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7265 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrInvalid;
7266 iemVmxVmFailInvalid(pVCpu);
7267 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7268 return VINF_SUCCESS;
7269 }
7270
7271 /* VMCS-link pointer in non-root mode. */
7272 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7273 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
7274 {
7275 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
7276 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_LinkPtrInvalid;
7277 iemVmxVmFailInvalid(pVCpu);
7278 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7279 return VINF_SUCCESS;
7280 }
7281
7282 /* Supported VMCS field. */
7283 if (iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
7284 {
7285 Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
7286 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_FieldInvalid;
7287 iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
7288 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7289 return VINF_SUCCESS;
7290 }
7291
7292 /*
7293 * Setup reading from the current or shadow VMCS.
7294 */
7295 uint8_t *pbVmcs;
7296 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7297 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
7298 else
7299 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7300 Assert(pbVmcs);
7301
7302 VMXVMCSFIELDENC FieldEnc;
7303 FieldEnc.u = RT_LO_U32(u64FieldEnc);
7304 uint8_t const uWidth = FieldEnc.n.u2Width;
7305 uint8_t const uType = FieldEnc.n.u2Type;
7306 uint8_t const uWidthType = (uWidth << 2) | uType;
7307 uint8_t const uIndex = FieldEnc.n.u8Index;
7308 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
7309 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
7310
7311 /*
7312 * Read the VMCS component based on the field's effective width.
7313 *
7314 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
7315 * indicates high bits (little endian).
7316 *
7317 * Note! The caller is responsible to trim the result and update registers
7318 * or memory locations are required. Here we just zero-extend to the largest
7319 * type (i.e. 64-bits).
7320 */
7321 uint8_t *pbField = pbVmcs + offField;
7322 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
7323 switch (uEffWidth)
7324 {
7325 case VMX_VMCS_ENC_WIDTH_64BIT:
7326 case VMX_VMCS_ENC_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break;
7327 case VMX_VMCS_ENC_WIDTH_32BIT: *pu64Dst = *(uint32_t *)pbField; break;
7328 case VMX_VMCS_ENC_WIDTH_16BIT: *pu64Dst = *(uint16_t *)pbField; break;
7329 }
7330 return VINF_SUCCESS;
7331}
7332
7333
7334/**
7335 * VMREAD (64-bit register) instruction execution worker.
7336 *
7337 * @returns Strict VBox status code.
7338 * @param pVCpu The cross context virtual CPU structure.
7339 * @param cbInstr The instruction length in bytes.
7340 * @param pu64Dst Where to store the VMCS field's value.
7341 * @param u64FieldEnc The VMCS field encoding.
7342 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7343 * be NULL.
7344 */
7345IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
7346 PCVMXVEXITINFO pExitInfo)
7347{
7348 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
7349 if (rcStrict == VINF_SUCCESS)
7350 {
7351 iemVmxVmreadSuccess(pVCpu, cbInstr);
7352 return VINF_SUCCESS;
7353 }
7354
7355 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7356 return rcStrict;
7357}
7358
7359
7360/**
7361 * VMREAD (32-bit register) instruction execution worker.
7362 *
7363 * @returns Strict VBox status code.
7364 * @param pVCpu The cross context virtual CPU structure.
7365 * @param cbInstr The instruction length in bytes.
7366 * @param pu32Dst Where to store the VMCS field's value.
7367 * @param u32FieldEnc The VMCS field encoding.
7368 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7369 * be NULL.
7370 */
7371IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPU pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint64_t u32FieldEnc,
7372 PCVMXVEXITINFO pExitInfo)
7373{
7374 uint64_t u64Dst;
7375 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u32FieldEnc, pExitInfo);
7376 if (rcStrict == VINF_SUCCESS)
7377 {
7378 *pu32Dst = u64Dst;
7379 iemVmxVmreadSuccess(pVCpu, cbInstr);
7380 return VINF_SUCCESS;
7381 }
7382
7383 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7384 return rcStrict;
7385}
7386
7387
7388/**
7389 * VMREAD (memory) instruction execution worker.
7390 *
7391 * @returns Strict VBox status code.
7392 * @param pVCpu The cross context virtual CPU structure.
7393 * @param cbInstr The instruction length in bytes.
7394 * @param iEffSeg The effective segment register to use with @a u64Val.
7395 * Pass UINT8_MAX if it is a register access.
7396 * @param enmEffAddrMode The effective addressing mode (only used with memory
7397 * operand).
7398 * @param GCPtrDst The guest linear address to store the VMCS field's
7399 * value.
7400 * @param u64FieldEnc The VMCS field encoding.
7401 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7402 * be NULL.
7403 */
7404IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode,
7405 RTGCPTR GCPtrDst, uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
7406{
7407 uint64_t u64Dst;
7408 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u64FieldEnc, pExitInfo);
7409 if (rcStrict == VINF_SUCCESS)
7410 {
7411 /*
7412 * Write the VMCS field's value to the location specified in guest-memory.
7413 *
7414 * The pointer size depends on the address size (address-size prefix allowed).
7415 * The operand size depends on IA-32e mode (operand-size prefix not allowed).
7416 */
7417 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
7418 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
7419 GCPtrDst &= s_auAddrSizeMasks[enmEffAddrMode];
7420
7421 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7422 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
7423 else
7424 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
7425 if (rcStrict == VINF_SUCCESS)
7426 {
7427 iemVmxVmreadSuccess(pVCpu, cbInstr);
7428 return VINF_SUCCESS;
7429 }
7430
7431 Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
7432 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrMap;
7433 return rcStrict;
7434 }
7435
7436 Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7437 return rcStrict;
7438}
7439
7440
7441/**
7442 * VMWRITE instruction execution worker.
7443 *
7444 * @returns Strict VBox status code.
7445 * @param pVCpu The cross context virtual CPU structure.
7446 * @param cbInstr The instruction length in bytes.
7447 * @param iEffSeg The effective segment register to use with @a u64Val.
7448 * Pass UINT8_MAX if it is a register access.
7449 * @param enmEffAddrMode The effective addressing mode (only used with memory
7450 * operand).
7451 * @param u64Val The value to write (or guest linear address to the
7452 * value), @a iEffSeg will indicate if it's a memory
7453 * operand.
7454 * @param u64FieldEnc The VMCS field encoding.
7455 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7456 * be NULL.
7457 */
7458IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, uint64_t u64Val,
7459 uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
7460{
7461 /* Nested-guest intercept. */
7462 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7463 && iemVmxIsVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMWRITE, u64FieldEnc))
7464 {
7465 if (pExitInfo)
7466 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7467 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMWRITE, VMXINSTRID_VMWRITE, cbInstr);
7468 }
7469
7470 /* CPL. */
7471 if (pVCpu->iem.s.uCpl > 0)
7472 {
7473 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7474 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Cpl;
7475 return iemRaiseGeneralProtectionFault0(pVCpu);
7476 }
7477
7478 /* VMCS pointer in root mode. */
7479 if ( IEM_VMX_IS_ROOT_MODE(pVCpu)
7480 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
7481 {
7482 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7483 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrInvalid;
7484 iemVmxVmFailInvalid(pVCpu);
7485 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7486 return VINF_SUCCESS;
7487 }
7488
7489 /* VMCS-link pointer in non-root mode. */
7490 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7491 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
7492 {
7493 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
7494 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_LinkPtrInvalid;
7495 iemVmxVmFailInvalid(pVCpu);
7496 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7497 return VINF_SUCCESS;
7498 }
7499
7500 /* If the VMWRITE instruction references memory, access the specified memory operand. */
7501 bool const fIsRegOperand = iEffSeg == UINT8_MAX;
7502 if (!fIsRegOperand)
7503 {
7504 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
7505 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
7506 RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[enmEffAddrMode];
7507
7508 /* Read the value from the specified guest memory location. */
7509 VBOXSTRICTRC rcStrict;
7510 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7511 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
7512 else
7513 {
7514 uint32_t u32Val;
7515 rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, iEffSeg, GCPtrVal);
7516 u64Val = u32Val;
7517 }
7518 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7519 {
7520 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
7521 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrMap;
7522 return rcStrict;
7523 }
7524 }
7525 else
7526 Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
7527
7528 /* Supported VMCS field. */
7529 if (!iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
7530 {
7531 Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
7532 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldInvalid;
7533 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
7534 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7535 return VINF_SUCCESS;
7536 }
7537
7538 /* Read-only VMCS field. */
7539 bool const fIsFieldReadOnly = HMVmxIsVmcsFieldReadOnly(u64FieldEnc);
7540 if ( fIsFieldReadOnly
7541 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
7542 {
7543 Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64FieldEnc));
7544 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldRo;
7545 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
7546 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7547 return VINF_SUCCESS;
7548 }
7549
7550 /*
7551 * Setup writing to the current or shadow VMCS.
7552 */
7553 uint8_t *pbVmcs;
7554 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7555 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
7556 else
7557 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7558 Assert(pbVmcs);
7559
7560 VMXVMCSFIELDENC FieldEnc;
7561 FieldEnc.u = RT_LO_U32(u64FieldEnc);
7562 uint8_t const uWidth = FieldEnc.n.u2Width;
7563 uint8_t const uType = FieldEnc.n.u2Type;
7564 uint8_t const uWidthType = (uWidth << 2) | uType;
7565 uint8_t const uIndex = FieldEnc.n.u8Index;
7566 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
7567 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
7568
7569 /*
7570 * Write the VMCS component based on the field's effective width.
7571 *
7572 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
7573 * indicates high bits (little endian).
7574 */
7575 uint8_t *pbField = pbVmcs + offField;
7576 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
7577 switch (uEffWidth)
7578 {
7579 case VMX_VMCS_ENC_WIDTH_64BIT:
7580 case VMX_VMCS_ENC_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
7581 case VMX_VMCS_ENC_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break;
7582 case VMX_VMCS_ENC_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break;
7583 }
7584
7585 iemVmxVmSucceed(pVCpu);
7586 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7587 return VINF_SUCCESS;
7588}
7589
7590
7591/**
7592 * VMCLEAR instruction execution worker.
7593 *
7594 * @returns Strict VBox status code.
7595 * @param pVCpu The cross context virtual CPU structure.
7596 * @param cbInstr The instruction length in bytes.
7597 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
7598 * @param GCPtrVmcs The linear address of the VMCS pointer.
7599 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7600 * be NULL.
7601 *
7602 * @remarks Common VMX instruction checks are already expected to by the caller,
7603 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7604 */
7605IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
7606 PCVMXVEXITINFO pExitInfo)
7607{
7608 /* Nested-guest intercept. */
7609 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7610 {
7611 if (pExitInfo)
7612 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7613 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMCLEAR, VMXINSTRID_NONE, cbInstr);
7614 }
7615
7616 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7617
7618 /* CPL. */
7619 if (pVCpu->iem.s.uCpl > 0)
7620 {
7621 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7622 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Cpl;
7623 return iemRaiseGeneralProtectionFault0(pVCpu);
7624 }
7625
7626 /* Get the VMCS pointer from the location specified by the source memory operand. */
7627 RTGCPHYS GCPhysVmcs;
7628 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
7629 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7630 {
7631 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
7632 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrMap;
7633 return rcStrict;
7634 }
7635
7636 /* VMCS pointer alignment. */
7637 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
7638 {
7639 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
7640 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAlign;
7641 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7642 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7643 return VINF_SUCCESS;
7644 }
7645
7646 /* VMCS physical-address width limits. */
7647 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
7648 {
7649 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
7650 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrWidth;
7651 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7652 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7653 return VINF_SUCCESS;
7654 }
7655
7656 /* VMCS is not the VMXON region. */
7657 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
7658 {
7659 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
7660 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrVmxon;
7661 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
7662 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7663 return VINF_SUCCESS;
7664 }
7665
7666 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
7667 restriction imposed by our implementation. */
7668 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
7669 {
7670 Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
7671 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAbnormal;
7672 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7673 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7674 return VINF_SUCCESS;
7675 }
7676
7677 /*
7678 * VMCLEAR allows committing and clearing any valid VMCS pointer.
7679 *
7680 * If the current VMCS is the one being cleared, set its state to 'clear' and commit
7681 * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
7682 * to 'clear'.
7683 */
7684 uint8_t const fVmcsStateClear = VMX_V_VMCS_STATE_CLEAR;
7685 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
7686 {
7687 Assert(GCPhysVmcs != NIL_RTGCPHYS); /* Paranoia. */
7688 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
7689 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsStateClear;
7690 iemVmxCommitCurrentVmcsToMemory(pVCpu);
7691 Assert(!IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
7692 }
7693 else
7694 {
7695 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPtrVmcs + RT_UOFFSETOF(VMXVVMCS, fVmcsState),
7696 (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear));
7697 }
7698
7699 iemVmxVmSucceed(pVCpu);
7700 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7701 return rcStrict;
7702}
7703
7704
7705/**
7706 * VMPTRST instruction execution worker.
7707 *
7708 * @returns Strict VBox status code.
7709 * @param pVCpu The cross context virtual CPU structure.
7710 * @param cbInstr The instruction length in bytes.
7711 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
7712 * @param GCPtrVmcs The linear address of where to store the current VMCS
7713 * pointer.
7714 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7715 * be NULL.
7716 *
7717 * @remarks Common VMX instruction checks are already expected to by the caller,
7718 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7719 */
7720IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
7721 PCVMXVEXITINFO pExitInfo)
7722{
7723 /* Nested-guest intercept. */
7724 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7725 {
7726 if (pExitInfo)
7727 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7728 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRST, VMXINSTRID_NONE, cbInstr);
7729 }
7730
7731 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7732
7733 /* CPL. */
7734 if (pVCpu->iem.s.uCpl > 0)
7735 {
7736 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7737 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Cpl;
7738 return iemRaiseGeneralProtectionFault0(pVCpu);
7739 }
7740
7741 /* Set the VMCS pointer to the location specified by the destination memory operand. */
7742 AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
7743 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
7744 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7745 {
7746 iemVmxVmSucceed(pVCpu);
7747 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7748 return rcStrict;
7749 }
7750
7751 Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7752 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_PtrMap;
7753 return rcStrict;
7754}
7755
7756
7757/**
7758 * VMPTRLD instruction execution worker.
7759 *
7760 * @returns Strict VBox status code.
7761 * @param pVCpu The cross context virtual CPU structure.
7762 * @param cbInstr The instruction length in bytes.
7763 * @param GCPtrVmcs The linear address of the current VMCS pointer.
7764 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7765 * be NULL.
7766 *
7767 * @remarks Common VMX instruction checks are already expected to by the caller,
7768 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7769 */
7770IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
7771 PCVMXVEXITINFO pExitInfo)
7772{
7773 /* Nested-guest intercept. */
7774 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7775 {
7776 if (pExitInfo)
7777 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7778 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRLD, VMXINSTRID_NONE, cbInstr);
7779 }
7780
7781 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7782
7783 /* CPL. */
7784 if (pVCpu->iem.s.uCpl > 0)
7785 {
7786 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7787 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Cpl;
7788 return iemRaiseGeneralProtectionFault0(pVCpu);
7789 }
7790
7791 /* Get the VMCS pointer from the location specified by the source memory operand. */
7792 RTGCPHYS GCPhysVmcs;
7793 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
7794 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7795 {
7796 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
7797 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrMap;
7798 return rcStrict;
7799 }
7800
7801 /* VMCS pointer alignment. */
7802 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
7803 {
7804 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
7805 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAlign;
7806 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
7807 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7808 return VINF_SUCCESS;
7809 }
7810
7811 /* VMCS physical-address width limits. */
7812 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
7813 {
7814 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
7815 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrWidth;
7816 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
7817 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7818 return VINF_SUCCESS;
7819 }
7820
7821 /* VMCS is not the VMXON region. */
7822 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
7823 {
7824 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
7825 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrVmxon;
7826 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
7827 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7828 return VINF_SUCCESS;
7829 }
7830
7831 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
7832 restriction imposed by our implementation. */
7833 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
7834 {
7835 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
7836 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAbnormal;
7837 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
7838 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7839 return VINF_SUCCESS;
7840 }
7841
7842 /* Read the VMCS revision ID from the VMCS. */
7843 VMXVMCSREVID VmcsRevId;
7844 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
7845 if (RT_FAILURE(rc))
7846 {
7847 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
7848 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrReadPhys;
7849 return rc;
7850 }
7851
7852 /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
7853 also check VMCS shadowing feature. */
7854 if ( VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
7855 || ( VmcsRevId.n.fIsShadowVmcs
7856 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
7857 {
7858 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
7859 {
7860 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
7861 VmcsRevId.n.u31RevisionId));
7862 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_VmcsRevId;
7863 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
7864 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7865 return VINF_SUCCESS;
7866 }
7867
7868 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
7869 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_ShadowVmcs;
7870 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
7871 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7872 return VINF_SUCCESS;
7873 }
7874
7875 /*
7876 * We only maintain only the current VMCS in our virtual CPU context (CPUMCTX). Therefore,
7877 * VMPTRLD shall always flush any existing current VMCS back to guest memory before loading
7878 * a new VMCS as current.
7879 */
7880 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
7881 {
7882 iemVmxCommitCurrentVmcsToMemory(pVCpu);
7883 IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
7884 }
7885
7886 iemVmxVmSucceed(pVCpu);
7887 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7888 return VINF_SUCCESS;
7889}
7890
7891
7892/**
7893 * VMXON instruction execution worker.
7894 *
7895 * @returns Strict VBox status code.
7896 * @param pVCpu The cross context virtual CPU structure.
7897 * @param cbInstr The instruction length in bytes.
7898 * @param iEffSeg The effective segment register to use with @a
7899 * GCPtrVmxon.
7900 * @param GCPtrVmxon The linear address of the VMXON pointer.
7901 * @param pExitInfo Pointer to the VM-exit instruction information struct.
7902 * Optional, can be NULL.
7903 *
7904 * @remarks Common VMX instruction checks are already expected to by the caller,
7905 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7906 */
7907IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
7908 PCVMXVEXITINFO pExitInfo)
7909{
7910#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
7911 RT_NOREF5(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
7912 return VINF_EM_RAW_EMULATE_INSTR;
7913#else
7914 if (!IEM_VMX_IS_ROOT_MODE(pVCpu))
7915 {
7916 /* CPL. */
7917 if (pVCpu->iem.s.uCpl > 0)
7918 {
7919 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7920 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cpl;
7921 return iemRaiseGeneralProtectionFault0(pVCpu);
7922 }
7923
7924 /* A20M (A20 Masked) mode. */
7925 if (!PGMPhysIsA20Enabled(pVCpu))
7926 {
7927 Log(("vmxon: A20M mode -> #GP(0)\n"));
7928 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_A20M;
7929 return iemRaiseGeneralProtectionFault0(pVCpu);
7930 }
7931
7932 /* CR0. */
7933 {
7934 /* CR0 MB1 bits. */
7935 uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
7936 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
7937 {
7938 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
7939 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed0;
7940 return iemRaiseGeneralProtectionFault0(pVCpu);
7941 }
7942
7943 /* CR0 MBZ bits. */
7944 uint64_t const uCr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
7945 if (pVCpu->cpum.GstCtx.cr0 & ~uCr0Fixed1)
7946 {
7947 Log(("vmxon: CR0 fixed1 bits set -> #GP(0)\n"));
7948 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed1;
7949 return iemRaiseGeneralProtectionFault0(pVCpu);
7950 }
7951 }
7952
7953 /* CR4. */
7954 {
7955 /* CR4 MB1 bits. */
7956 uint64_t const uCr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
7957 if ((pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0) != uCr4Fixed0)
7958 {
7959 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
7960 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed0;
7961 return iemRaiseGeneralProtectionFault0(pVCpu);
7962 }
7963
7964 /* CR4 MBZ bits. */
7965 uint64_t const uCr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
7966 if (pVCpu->cpum.GstCtx.cr4 & ~uCr4Fixed1)
7967 {
7968 Log(("vmxon: CR4 fixed1 bits set -> #GP(0)\n"));
7969 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed1;
7970 return iemRaiseGeneralProtectionFault0(pVCpu);
7971 }
7972 }
7973
7974 /* Feature control MSR's LOCK and VMXON bits. */
7975 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
7976 if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
7977 {
7978 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
7979 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_MsrFeatCtl;
7980 return iemRaiseGeneralProtectionFault0(pVCpu);
7981 }
7982
7983 /* Get the VMXON pointer from the location specified by the source memory operand. */
7984 RTGCPHYS GCPhysVmxon;
7985 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
7986 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7987 {
7988 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
7989 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrMap;
7990 return rcStrict;
7991 }
7992
7993 /* VMXON region pointer alignment. */
7994 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
7995 {
7996 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
7997 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAlign;
7998 iemVmxVmFailInvalid(pVCpu);
7999 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8000 return VINF_SUCCESS;
8001 }
8002
8003 /* VMXON physical-address width limits. */
8004 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
8005 {
8006 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
8007 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrWidth;
8008 iemVmxVmFailInvalid(pVCpu);
8009 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8010 return VINF_SUCCESS;
8011 }
8012
8013 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
8014 restriction imposed by our implementation. */
8015 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
8016 {
8017 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
8018 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAbnormal;
8019 iemVmxVmFailInvalid(pVCpu);
8020 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8021 return VINF_SUCCESS;
8022 }
8023
8024 /* Read the VMCS revision ID from the VMXON region. */
8025 VMXVMCSREVID VmcsRevId;
8026 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
8027 if (RT_FAILURE(rc))
8028 {
8029 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
8030 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrReadPhys;
8031 return rc;
8032 }
8033
8034 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
8035 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
8036 {
8037 /* Revision ID mismatch. */
8038 if (!VmcsRevId.n.fIsShadowVmcs)
8039 {
8040 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
8041 VmcsRevId.n.u31RevisionId));
8042 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmcsRevId;
8043 iemVmxVmFailInvalid(pVCpu);
8044 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8045 return VINF_SUCCESS;
8046 }
8047
8048 /* Shadow VMCS disallowed. */
8049 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
8050 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_ShadowVmcs;
8051 iemVmxVmFailInvalid(pVCpu);
8052 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8053 return VINF_SUCCESS;
8054 }
8055
8056 /*
8057 * Record that we're in VMX operation, block INIT, block and disable A20M.
8058 */
8059 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
8060 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
8061 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
8062
8063 /* Clear address-range monitoring. */
8064 EMMonitorWaitClear(pVCpu);
8065 /** @todo NSTVMX: Intel PT. */
8066
8067 iemVmxVmSucceed(pVCpu);
8068 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8069# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
8070 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
8071# else
8072 return VINF_SUCCESS;
8073# endif
8074 }
8075 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8076 {
8077 /* Nested-guest intercept. */
8078 if (pExitInfo)
8079 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
8080 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMXON, VMXINSTRID_NONE, cbInstr);
8081 }
8082
8083 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
8084
8085 /* CPL. */
8086 if (pVCpu->iem.s.uCpl > 0)
8087 {
8088 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8089 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxRootCpl;
8090 return iemRaiseGeneralProtectionFault0(pVCpu);
8091 }
8092
8093 /* VMXON when already in VMX root mode. */
8094 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
8095 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxAlreadyRoot;
8096 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8097 return VINF_SUCCESS;
8098#endif
8099}
8100
8101
8102/**
8103 * Implements 'VMXOFF'.
8104 *
8105 * @remarks Common VMX instruction checks are already expected to by the caller,
8106 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8107 */
8108IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
8109{
8110# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
8111 RT_NOREF2(pVCpu, cbInstr);
8112 return VINF_EM_RAW_EMULATE_INSTR;
8113# else
8114 /* Nested-guest intercept. */
8115 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8116 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMXOFF, cbInstr);
8117
8118 /* CPL. */
8119 if (pVCpu->iem.s.uCpl > 0)
8120 {
8121 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8122 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxoff_Cpl;
8123 return iemRaiseGeneralProtectionFault0(pVCpu);
8124 }
8125
8126 /* Dual monitor treatment of SMIs and SMM. */
8127 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
8128 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
8129 {
8130 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
8131 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8132 return VINF_SUCCESS;
8133 }
8134
8135 /* Record that we're no longer in VMX root operation, block INIT, block and disable A20M. */
8136 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
8137 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
8138
8139 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
8140 { /** @todo NSTVMX: Unblock SMI. */ }
8141
8142 EMMonitorWaitClear(pVCpu);
8143 /** @todo NSTVMX: Unblock and enable A20M. */
8144
8145 iemVmxVmSucceed(pVCpu);
8146 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8147# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
8148 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
8149# else
8150 return VINF_SUCCESS;
8151# endif
8152# endif
8153}
8154
8155
8156/**
8157 * Implements 'VMXON'.
8158 */
8159IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
8160{
8161 return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
8162}
8163
8164
8165/**
8166 * Implements 'VMLAUNCH'.
8167 */
8168IEM_CIMPL_DEF_0(iemCImpl_vmlaunch)
8169{
8170 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMLAUNCH, NULL /* pExitInfo */);
8171}
8172
8173
8174/**
8175 * Implements 'VMRESUME'.
8176 */
8177IEM_CIMPL_DEF_0(iemCImpl_vmresume)
8178{
8179 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMRESUME, NULL /* pExitInfo */);
8180}
8181
8182
8183/**
8184 * Implements 'VMPTRLD'.
8185 */
8186IEM_CIMPL_DEF_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
8187{
8188 return iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
8189}
8190
8191
8192/**
8193 * Implements 'VMPTRST'.
8194 */
8195IEM_CIMPL_DEF_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
8196{
8197 return iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
8198}
8199
8200
8201/**
8202 * Implements 'VMCLEAR'.
8203 */
8204IEM_CIMPL_DEF_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
8205{
8206 return iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
8207}
8208
8209
8210/**
8211 * Implements 'VMWRITE' register.
8212 */
8213IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64FieldEnc)
8214{
8215 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, IEMMODE_64BIT /* N/A */, u64Val, u64FieldEnc,
8216 NULL /* pExitInfo */);
8217}
8218
8219
8220/**
8221 * Implements 'VMWRITE' memory.
8222 */
8223IEM_CIMPL_DEF_4(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrVal, uint32_t, u64FieldEnc)
8224{
8225 return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrVal, u64FieldEnc, NULL /* pExitInfo */);
8226}
8227
8228
8229/**
8230 * Implements 'VMREAD' 64-bit register.
8231 */
8232IEM_CIMPL_DEF_2(iemCImpl_vmread64_reg, uint64_t *, pu64Dst, uint64_t, u64FieldEnc)
8233{
8234 return iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, NULL /* pExitInfo */);
8235}
8236
8237
8238/**
8239 * Implements 'VMREAD' 32-bit register.
8240 */
8241IEM_CIMPL_DEF_2(iemCImpl_vmread32_reg, uint32_t *, pu32Dst, uint32_t, u32FieldEnc)
8242{
8243 return iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u32FieldEnc, NULL /* pExitInfo */);
8244}
8245
8246
8247/**
8248 * Implements 'VMREAD' memory.
8249 */
8250IEM_CIMPL_DEF_4(iemCImpl_vmread_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrDst, uint32_t, u64FieldEnc)
8251{
8252 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, u64FieldEnc, NULL /* pExitInfo */);
8253}
8254
8255
8256/**
8257 * Implements VMX's implementation of PAUSE.
8258 */
8259IEM_CIMPL_DEF_0(iemCImpl_vmx_pause)
8260{
8261 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8262 {
8263 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrPause(pVCpu, cbInstr);
8264 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
8265 return rcStrict;
8266 }
8267
8268 /*
8269 * Outside VMX non-root operation or if the PAUSE instruction does not cause
8270 * a VM-exit, the instruction operates normally.
8271 */
8272 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8273 return VINF_SUCCESS;
8274}
8275
8276#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
8277
8278
8279/**
8280 * Implements 'VMCALL'.
8281 */
8282IEM_CIMPL_DEF_0(iemCImpl_vmcall)
8283{
8284#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8285 /* Nested-guest intercept. */
8286 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8287 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMCALL, cbInstr);
8288#endif
8289
8290 /* Join forces with vmmcall. */
8291 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
8292}
8293
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette