VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 92686

Last change on this file since 92686 was 92686, checked in by vboxsync, 3 years ago

VMM/IEM: Nested VMX: bugref:10092 EPT misconfig bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 360.7 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 92686 2021-12-02 08:41:22Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
23/**
24 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
25 * relative offsets.
26 */
27# ifdef IEM_WITH_CODE_TLB
28# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
29# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
30# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
31# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
32# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
33# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
34# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
35# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
36# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
37# else /* !IEM_WITH_CODE_TLB */
38# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
39 do \
40 { \
41 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
42 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
43 } while (0)
44
45# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
46
47# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
48 do \
49 { \
50 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
51 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
52 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
53 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
54 } while (0)
55
56# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
57 do \
58 { \
59 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
60 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
61 } while (0)
62
63# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
64 do \
65 { \
66 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
67 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
68 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
69 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
70 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
71 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
72 } while (0)
73
74# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
75 do \
76 { \
77 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
78 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
79 } while (0)
80
81# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
82 do \
83 { \
84 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
85 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
86 } while (0)
87
88# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
89 do \
90 { \
91 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
92 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
93 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
94 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
95 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
96 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
97 } while (0)
98# endif /* !IEM_WITH_CODE_TLB */
99
100/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
101# define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
102
103/** Whether a shadow VMCS is present for the given VCPU. */
104# define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
105
106/** Gets the VMXON region pointer. */
107# define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
108
109/** Gets the guest-physical address of the current VMCS for the given VCPU. */
110# define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
111
112/** Whether a current VMCS is present for the given VCPU. */
113# define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
114
115/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
116# define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
117 do \
118 { \
119 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
120 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
121 } while (0)
122
123/** Clears any current VMCS for the given VCPU. */
124# define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
125 do \
126 { \
127 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
128 } while (0)
129
130/** Check for VMX instructions requiring to be in VMX operation.
131 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs updating. */
132# define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
133 do \
134 { \
135 if (IEM_VMX_IS_ROOT_MODE(a_pVCpu)) \
136 { /* likely */ } \
137 else \
138 { \
139 Log((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
140 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
141 return iemRaiseUndefinedOpcode(a_pVCpu); \
142 } \
143 } while (0)
144
145/** Marks a VM-entry failure with a diagnostic reason, logs and returns. */
146# define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag) \
147 do \
148 { \
149 LogRel(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_VmxDiag), \
150 HMGetVmxDiagDesc(a_VmxDiag), (a_pszFailure))); \
151 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
152 return VERR_VMX_VMENTRY_FAILED; \
153 } while (0)
154
155/** Marks a VM-exit failure with a diagnostic reason and logs. */
156# define IEM_VMX_VMEXIT_FAILED(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \
157 do \
158 { \
159 LogRel(("VM-exit failed! uExitReason=%u enmDiag=%u (%s) -> %s\n", (a_uExitReason), (a_VmxDiag), \
160 HMGetVmxDiagDesc(a_VmxDiag), (a_pszFailure))); \
161 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
162 } while (0)
163
164/** Marks a VM-exit failure with a diagnostic reason, logs and returns. */
165# define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \
166 do \
167 { \
168 IEM_VMX_VMEXIT_FAILED(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag); \
169 return VERR_VMX_VMEXIT_FAILED; \
170 } while (0)
171
172
173/*********************************************************************************************************************************
174* Global Variables *
175*********************************************************************************************************************************/
176/** @todo NSTVMX: The following VM-exit intercepts are pending:
177 * VMX_EXIT_IO_SMI
178 * VMX_EXIT_SMI
179 * VMX_EXIT_GETSEC
180 * VMX_EXIT_RSM
181 * VMX_EXIT_MONITOR (APIC access VM-exit caused by MONITOR pending)
182 * VMX_EXIT_ERR_MACHINE_CHECK (we never need to raise this?)
183 * VMX_EXIT_EPT_VIOLATION
184 * VMX_EXIT_EPT_MISCONFIG
185 * VMX_EXIT_INVEPT
186 * VMX_EXIT_RDRAND
187 * VMX_EXIT_VMFUNC
188 * VMX_EXIT_ENCLS
189 * VMX_EXIT_RDSEED
190 * VMX_EXIT_PML_FULL
191 * VMX_EXIT_XSAVES
192 * VMX_EXIT_XRSTORS
193 */
194/**
195 * Map of VMCS field encodings to their virtual-VMCS structure offsets.
196 *
197 * The first array dimension is VMCS field encoding of Width OR'ed with Type and the
198 * second dimension is the Index, see VMXVMCSFIELD.
199 */
200uint16_t const g_aoffVmcsMap[16][VMX_V_VMCS_MAX_INDEX + 1] =
201{
202 /* VMX_VMCSFIELD_WIDTH_16BIT | VMX_VMCSFIELD_TYPE_CONTROL: */
203 {
204 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u16Vpid),
205 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u16PostIntNotifyVector),
206 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u16EptpIndex),
207 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
208 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
209 /* 19-26 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
210 /* 27 */ UINT16_MAX,
211 },
212 /* VMX_VMCSFIELD_WIDTH_16BIT | VMX_VMCSFIELD_TYPE_VMEXIT_INFO: */
213 {
214 /* 0-7 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
215 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
216 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
217 /* 24-27 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
218 },
219 /* VMX_VMCSFIELD_WIDTH_16BIT | VMX_VMCSFIELD_TYPE_GUEST_STATE: */
220 {
221 /* 0 */ RT_UOFFSETOF(VMXVVMCS, GuestEs),
222 /* 1 */ RT_UOFFSETOF(VMXVVMCS, GuestCs),
223 /* 2 */ RT_UOFFSETOF(VMXVVMCS, GuestSs),
224 /* 3 */ RT_UOFFSETOF(VMXVVMCS, GuestDs),
225 /* 4 */ RT_UOFFSETOF(VMXVVMCS, GuestFs),
226 /* 5 */ RT_UOFFSETOF(VMXVVMCS, GuestGs),
227 /* 6 */ RT_UOFFSETOF(VMXVVMCS, GuestLdtr),
228 /* 7 */ RT_UOFFSETOF(VMXVVMCS, GuestTr),
229 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u16GuestIntStatus),
230 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u16PmlIndex),
231 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
232 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
233 /* 26-27 */ UINT16_MAX, UINT16_MAX
234 },
235 /* VMX_VMCSFIELD_WIDTH_16BIT | VMX_VMCSFIELD_TYPE_HOST_STATE: */
236 {
237 /* 0 */ RT_UOFFSETOF(VMXVVMCS, HostEs),
238 /* 1 */ RT_UOFFSETOF(VMXVVMCS, HostCs),
239 /* 2 */ RT_UOFFSETOF(VMXVVMCS, HostSs),
240 /* 3 */ RT_UOFFSETOF(VMXVVMCS, HostDs),
241 /* 4 */ RT_UOFFSETOF(VMXVVMCS, HostFs),
242 /* 5 */ RT_UOFFSETOF(VMXVVMCS, HostGs),
243 /* 6 */ RT_UOFFSETOF(VMXVVMCS, HostTr),
244 /* 7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
245 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
246 /* 23-27 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
247 },
248 /* VMX_VMCSFIELD_WIDTH_64BIT | VMX_VMCSFIELD_TYPE_CONTROL: */
249 {
250 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64AddrIoBitmapA),
251 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
252 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
253 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64AddrExitMsrStore),
254 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64AddrExitMsrLoad),
255 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEntryMsrLoad),
256 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
257 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64AddrPml),
258 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64TscOffset),
259 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVirtApic),
260 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64AddrApicAccess),
261 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64AddrPostedIntDesc),
262 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64VmFuncCtls),
263 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64EptPtr),
264 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap0),
265 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap1),
266 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap2),
267 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap3),
268 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEptpList),
269 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVmreadBitmap),
270 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVmwriteBitmap),
271 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u64AddrXcptVeInfo),
272 /* 22 */ RT_UOFFSETOF(VMXVVMCS, u64XssExitBitmap),
273 /* 23 */ RT_UOFFSETOF(VMXVVMCS, u64EnclsExitBitmap),
274 /* 24 */ RT_UOFFSETOF(VMXVVMCS, u64SppTablePtr),
275 /* 25 */ RT_UOFFSETOF(VMXVVMCS, u64TscMultiplier),
276 /* 26 */ RT_UOFFSETOF(VMXVVMCS, u64ProcCtls3),
277 /* 27 */ RT_UOFFSETOF(VMXVVMCS, u64EnclvExitBitmap)
278 },
279 /* VMX_VMCSFIELD_WIDTH_64BIT | VMX_VMCSFIELD_TYPE_VMEXIT_INFO: */
280 {
281 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64RoGuestPhysAddr),
282 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
283 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
284 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
285 /* 25-27 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
286 },
287 /* VMX_VMCSFIELD_WIDTH_64BIT | VMX_VMCSFIELD_TYPE_GUEST_STATE: */
288 {
289 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
290 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
291 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPatMsr),
292 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64GuestEferMsr),
293 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
294 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte0),
295 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte1),
296 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte2),
297 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte3),
298 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
299 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRtitCtlMsr),
300 /* 11 */ UINT16_MAX,
301 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPkrsMsr),
302 /* 13-20 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
303 /* 21-27 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
304 },
305 /* VMX_VMCSFIELD_WIDTH_64BIT | VMX_VMCSFIELD_TYPE_HOST_STATE: */
306 {
307 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64HostPatMsr),
308 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64HostEferMsr),
309 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
310 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64HostPkrsMsr),
311 /* 4-11 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
312 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
313 /* 20-27 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
314 },
315 /* VMX_VMCSFIELD_WIDTH_32BIT | VMX_VMCSFIELD_TYPE_CONTROL: */
316 {
317 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32PinCtls),
318 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32ProcCtls),
319 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32XcptBitmap),
320 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32XcptPFMask),
321 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32XcptPFMatch),
322 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32Cr3TargetCount),
323 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32ExitCtls),
324 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
325 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
326 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u32EntryCtls),
327 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
328 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u32EntryIntInfo),
329 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
330 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u32EntryInstrLen),
331 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u32TprThreshold),
332 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u32ProcCtls2),
333 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u32PleGap),
334 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u32PleWindow),
335 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
336 /* 26-27 */ UINT16_MAX, UINT16_MAX
337 },
338 /* VMX_VMCSFIELD_WIDTH_32BIT | VMX_VMCSFIELD_TYPE_VMEXIT_INFO: */
339 {
340 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32RoVmInstrError),
341 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitReason),
342 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitIntInfo),
343 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitIntErrCode),
344 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
345 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
346 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitInstrLen),
347 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitInstrInfo),
348 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
349 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
350 /* 24-27 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
351 },
352 /* VMX_VMCSFIELD_WIDTH_32BIT | VMX_VMCSFIELD_TYPE_GUEST_STATE: */
353 {
354 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsLimit),
355 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32GuestCsLimit),
356 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSsLimit),
357 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32GuestDsLimit),
358 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32GuestFsLimit),
359 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGsLimit),
360 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
361 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32GuestTrLimit),
362 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
363 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
364 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsAttr),
365 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u32GuestCsAttr),
366 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSsAttr),
367 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u32GuestDsAttr),
368 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u32GuestFsAttr),
369 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGsAttr),
370 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
371 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u32GuestTrAttr),
372 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u32GuestIntrState),
373 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u32GuestActivityState),
374 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSmBase),
375 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSysenterCS),
376 /* 22 */ UINT16_MAX,
377 /* 23 */ RT_UOFFSETOF(VMXVVMCS, u32PreemptTimer),
378 /* 24-27 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
379 },
380 /* VMX_VMCSFIELD_WIDTH_32BIT | VMX_VMCSFIELD_TYPE_HOST_STATE: */
381 {
382 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32HostSysenterCs),
383 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
384 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
385 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
386 /* 25-27 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
387 },
388 /* VMX_VMCSFIELD_WIDTH_NATURAL | VMX_VMCSFIELD_TYPE_CONTROL: */
389 {
390 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64Cr0Mask),
391 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64Cr4Mask),
392 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
393 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
394 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target0),
395 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target1),
396 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target2),
397 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target3),
398 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
399 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
400 /* 24-27 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
401 },
402 /* VMX_VMCSFIELD_WIDTH_NATURAL | VMX_VMCSFIELD_TYPE_VMEXIT_INFO: */
403 {
404 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64RoExitQual),
405 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRcx),
406 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRsi),
407 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRdi),
408 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRip),
409 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64RoGuestLinearAddr),
410 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
411 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
412 /* 22-27 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
413 },
414 /* VMX_VMCSFIELD_WIDTH_NATURAL | VMX_VMCSFIELD_TYPE_GUEST_STATE: */
415 {
416 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr0),
417 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr3),
418 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr4),
419 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64GuestEsBase),
420 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCsBase),
421 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSsBase),
422 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDsBase),
423 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64GuestFsBase),
424 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64GuestGsBase),
425 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64GuestLdtrBase),
426 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64GuestTrBase),
427 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64GuestGdtrBase),
428 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64GuestIdtrBase),
429 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDr7),
430 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRsp),
431 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRip),
432 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRFlags),
433 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpts),
434 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
435 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSysenterEip),
436 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSCetMsr),
437 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSsp),
438 /* 22 */ RT_UOFFSETOF(VMXVVMCS, u64GuestIntrSspTableAddrMsr),
439 /* 23-27 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
440 },
441 /* VMX_VMCSFIELD_WIDTH_NATURAL | VMX_VMCSFIELD_TYPE_HOST_STATE: */
442 {
443 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr0),
444 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr3),
445 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr4),
446 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64HostFsBase),
447 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64HostGsBase),
448 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64HostTrBase),
449 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64HostGdtrBase),
450 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64HostIdtrBase),
451 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64HostSysenterEsp),
452 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64HostSysenterEip),
453 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64HostRsp),
454 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64HostRip),
455 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64HostSCetMsr),
456 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64HostSsp),
457 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64HostIntrSspTableAddrMsr),
458 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
459 /* 23-27 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
460 }
461};
462
463
464/**
465 * Gets a host selector from the VMCS.
466 *
467 * @param pVmcs Pointer to the virtual VMCS.
468 * @param iSelReg The index of the segment register (X86_SREG_XXX).
469 */
470DECLINLINE(RTSEL) iemVmxVmcsGetHostSelReg(PCVMXVVMCS pVmcs, uint8_t iSegReg)
471{
472 Assert(iSegReg < X86_SREG_COUNT);
473 RTSEL HostSel;
474 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_16BIT;
475 uint8_t const uType = VMX_VMCSFIELD_TYPE_HOST_STATE;
476 uint8_t const uWidthType = (uWidth << 2) | uType;
477 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS16_HOST_ES_SEL, VMX_BF_VMCSFIELD_INDEX);
478 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
479 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
480 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
481 uint8_t const *pbField = pbVmcs + offField;
482 HostSel = *(uint16_t *)pbField;
483 return HostSel;
484}
485
486
487/**
488 * Sets a guest segment register in the VMCS.
489 *
490 * @param pVmcs Pointer to the virtual VMCS.
491 * @param iSegReg The index of the segment register (X86_SREG_XXX).
492 * @param pSelReg Pointer to the segment register.
493 */
494IEM_STATIC void iemVmxVmcsSetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCCPUMSELREG pSelReg)
495{
496 Assert(pSelReg);
497 Assert(iSegReg < X86_SREG_COUNT);
498
499 /* Selector. */
500 {
501 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_16BIT;
502 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
503 uint8_t const uWidthType = (uWidth << 2) | uType;
504 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCSFIELD_INDEX);
505 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
506 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
507 uint8_t *pbVmcs = (uint8_t *)pVmcs;
508 uint8_t *pbField = pbVmcs + offField;
509 *(uint16_t *)pbField = pSelReg->Sel;
510 }
511
512 /* Limit. */
513 {
514 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_32BIT;
515 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
516 uint8_t const uWidthType = (uWidth << 2) | uType;
517 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCSFIELD_INDEX);
518 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
519 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
520 uint8_t *pbVmcs = (uint8_t *)pVmcs;
521 uint8_t *pbField = pbVmcs + offField;
522 *(uint32_t *)pbField = pSelReg->u32Limit;
523 }
524
525 /* Base. */
526 {
527 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_NATURAL;
528 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
529 uint8_t const uWidthType = (uWidth << 2) | uType;
530 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCSFIELD_INDEX);
531 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
532 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
533 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
534 uint8_t const *pbField = pbVmcs + offField;
535 *(uint64_t *)pbField = pSelReg->u64Base;
536 }
537
538 /* Attributes. */
539 {
540 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
541 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
542 | X86DESCATTR_UNUSABLE;
543 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_32BIT;
544 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
545 uint8_t const uWidthType = (uWidth << 2) | uType;
546 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCSFIELD_INDEX);
547 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
548 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
549 uint8_t *pbVmcs = (uint8_t *)pVmcs;
550 uint8_t *pbField = pbVmcs + offField;
551 *(uint32_t *)pbField = pSelReg->Attr.u & fValidAttrMask;
552 }
553}
554
555
556/**
557 * Gets a guest segment register from the VMCS.
558 *
559 * @returns VBox status code.
560 * @param pVmcs Pointer to the virtual VMCS.
561 * @param iSegReg The index of the segment register (X86_SREG_XXX).
562 * @param pSelReg Where to store the segment register (only updated when
563 * VINF_SUCCESS is returned).
564 *
565 * @remarks Warning! This does not validate the contents of the retrieved segment
566 * register.
567 */
568IEM_STATIC int iemVmxVmcsGetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCPUMSELREG pSelReg)
569{
570 Assert(pSelReg);
571 Assert(iSegReg < X86_SREG_COUNT);
572
573 /* Selector. */
574 uint16_t u16Sel;
575 {
576 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_16BIT;
577 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
578 uint8_t const uWidthType = (uWidth << 2) | uType;
579 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCSFIELD_INDEX);
580 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
581 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
582 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
583 uint8_t const *pbField = pbVmcs + offField;
584 u16Sel = *(uint16_t *)pbField;
585 }
586
587 /* Limit. */
588 uint32_t u32Limit;
589 {
590 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_32BIT;
591 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
592 uint8_t const uWidthType = (uWidth << 2) | uType;
593 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCSFIELD_INDEX);
594 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
595 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
596 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
597 uint8_t const *pbField = pbVmcs + offField;
598 u32Limit = *(uint32_t *)pbField;
599 }
600
601 /* Base. */
602 uint64_t u64Base;
603 {
604 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_NATURAL;
605 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
606 uint8_t const uWidthType = (uWidth << 2) | uType;
607 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCSFIELD_INDEX);
608 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
609 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
610 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
611 uint8_t const *pbField = pbVmcs + offField;
612 u64Base = *(uint64_t *)pbField;
613 /** @todo NSTVMX: Should we zero out high bits here for 32-bit virtual CPUs? */
614 }
615
616 /* Attributes. */
617 uint32_t u32Attr;
618 {
619 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_32BIT;
620 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
621 uint8_t const uWidthType = (uWidth << 2) | uType;
622 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCSFIELD_INDEX);
623 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
624 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
625 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
626 uint8_t const *pbField = pbVmcs + offField;
627 u32Attr = *(uint32_t *)pbField;
628 }
629
630 pSelReg->Sel = u16Sel;
631 pSelReg->ValidSel = u16Sel;
632 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
633 pSelReg->u32Limit = u32Limit;
634 pSelReg->u64Base = u64Base;
635 pSelReg->Attr.u = u32Attr;
636 return VINF_SUCCESS;
637}
638
639
640/**
641 * Converts an IEM exception event type to a VMX event type.
642 *
643 * @returns The VMX event type.
644 * @param uVector The interrupt / exception vector.
645 * @param fFlags The IEM event flag (see IEM_XCPT_FLAGS_XXX).
646 */
647DECLINLINE(uint8_t) iemVmxGetEventType(uint32_t uVector, uint32_t fFlags)
648{
649 /* Paranoia (callers may use these interchangeably). */
650 AssertCompile(VMX_EXIT_INT_INFO_TYPE_NMI == VMX_IDT_VECTORING_INFO_TYPE_NMI);
651 AssertCompile(VMX_EXIT_INT_INFO_TYPE_HW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT);
652 AssertCompile(VMX_EXIT_INT_INFO_TYPE_EXT_INT == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
653 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT);
654 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_INT == VMX_IDT_VECTORING_INFO_TYPE_SW_INT);
655 AssertCompile(VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
656 AssertCompile(VMX_EXIT_INT_INFO_TYPE_NMI == VMX_ENTRY_INT_INFO_TYPE_NMI);
657 AssertCompile(VMX_EXIT_INT_INFO_TYPE_HW_XCPT == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT);
658 AssertCompile(VMX_EXIT_INT_INFO_TYPE_EXT_INT == VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
659 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_XCPT == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT);
660 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_INT == VMX_ENTRY_INT_INFO_TYPE_SW_INT);
661 AssertCompile(VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT);
662
663 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
664 {
665 if (uVector == X86_XCPT_NMI)
666 return VMX_EXIT_INT_INFO_TYPE_NMI;
667 return VMX_EXIT_INT_INFO_TYPE_HW_XCPT;
668 }
669
670 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
671 {
672 if (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR))
673 return VMX_EXIT_INT_INFO_TYPE_SW_XCPT;
674 if (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
675 return VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT;
676 return VMX_EXIT_INT_INFO_TYPE_SW_INT;
677 }
678
679 Assert(fFlags & IEM_XCPT_FLAGS_T_EXT_INT);
680 return VMX_EXIT_INT_INFO_TYPE_EXT_INT;
681}
682
683
684/**
685 * Determines whether the guest is using PAE paging given the VMCS.
686 *
687 * @returns @c true if PAE paging mode is used, @c false otherwise.
688 * @param pVmcs Pointer to the virtual VMCS.
689 */
690DECL_FORCE_INLINE(bool) iemVmxVmcsIsGuestPaePagingEnabled(PCVMXVVMCS pVmcs)
691{
692 return ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST)
693 && (pVmcs->u64GuestCr4.u & X86_CR4_PAE)
694 && (pVmcs->u64GuestCr0.u & X86_CR0_PG));
695}
696
697
698/**
699 * Sets the Exit qualification VMCS field.
700 *
701 * @param pVCpu The cross context virtual CPU structure.
702 * @param u64ExitQual The Exit qualification.
703 */
704DECL_FORCE_INLINE(void) iemVmxVmcsSetExitQual(PVMCPUCC pVCpu, uint64_t u64ExitQual)
705{
706 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64RoExitQual.u = u64ExitQual;
707}
708
709
710/**
711 * Sets the VM-exit interruption information field.
712 *
713 * @param pVCpu The cross context virtual CPU structure.
714 * @param uExitIntInfo The VM-exit interruption information.
715 */
716DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntInfo(PVMCPUCC pVCpu, uint32_t uExitIntInfo)
717{
718 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoExitIntInfo = uExitIntInfo;
719}
720
721
722/**
723 * Sets the VM-exit interruption error code.
724 *
725 * @param pVCpu The cross context virtual CPU structure.
726 * @param uErrCode The error code.
727 */
728DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntErrCode(PVMCPUCC pVCpu, uint32_t uErrCode)
729{
730 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoExitIntErrCode = uErrCode;
731}
732
733
734/**
735 * Sets the IDT-vectoring information field.
736 *
737 * @param pVCpu The cross context virtual CPU structure.
738 * @param uIdtVectorInfo The IDT-vectoring information.
739 */
740DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringInfo(PVMCPUCC pVCpu, uint32_t uIdtVectorInfo)
741{
742 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoIdtVectoringInfo = uIdtVectorInfo;
743}
744
745
746/**
747 * Sets the IDT-vectoring error code field.
748 *
749 * @param pVCpu The cross context virtual CPU structure.
750 * @param uErrCode The error code.
751 */
752DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringErrCode(PVMCPUCC pVCpu, uint32_t uErrCode)
753{
754 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoIdtVectoringErrCode = uErrCode;
755}
756
757
758/**
759 * Sets the VM-exit guest-linear address VMCS field.
760 *
761 * @param pVCpu The cross context virtual CPU structure.
762 * @param uGuestLinearAddr The VM-exit guest-linear address.
763 */
764DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestLinearAddr(PVMCPUCC pVCpu, uint64_t uGuestLinearAddr)
765{
766 /* Bits 63:32 of guest-linear address MBZ if the guest isn't in long mode prior to the VM-exit. */
767 Assert(CPUMIsGuestInLongModeEx(IEM_GET_CTX(pVCpu)) || !(uGuestLinearAddr & UINT64_C(0xffffffff00000000)));
768 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64RoGuestLinearAddr.u = uGuestLinearAddr;
769}
770
771
772/**
773 * Sets the VM-exit guest-physical address VMCS field.
774 *
775 * @param pVCpu The cross context virtual CPU structure.
776 * @param uGuestPhysAddr The VM-exit guest-physical address.
777 */
778DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestPhysAddr(PVMCPUCC pVCpu, uint64_t uGuestPhysAddr)
779{
780 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64RoGuestPhysAddr.u = uGuestPhysAddr;
781}
782
783
784/**
785 * Sets the VM-exit instruction length VMCS field.
786 *
787 * @param pVCpu The cross context virtual CPU structure.
788 * @param cbInstr The VM-exit instruction length in bytes.
789 *
790 * @remarks Callers may clear this field to 0. Hence, this function does not check
791 * the validity of the instruction length.
792 */
793DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrLen(PVMCPUCC pVCpu, uint32_t cbInstr)
794{
795 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoExitInstrLen = cbInstr;
796}
797
798
799/**
800 * Sets the VM-exit instruction info. VMCS field.
801 *
802 * @param pVCpu The cross context virtual CPU structure.
803 * @param uExitInstrInfo The VM-exit instruction information.
804 */
805DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrInfo(PVMCPUCC pVCpu, uint32_t uExitInstrInfo)
806{
807 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoExitInstrInfo = uExitInstrInfo;
808}
809
810
811/**
812 * Sets the guest pending-debug exceptions field.
813 *
814 * @param pVCpu The cross context virtual CPU structure.
815 * @param uGuestPendingDbgXcpts The guest pending-debug exceptions.
816 */
817DECL_FORCE_INLINE(void) iemVmxVmcsSetGuestPendingDbgXcpts(PVMCPUCC pVCpu, uint64_t uGuestPendingDbgXcpts)
818{
819 Assert(!(uGuestPendingDbgXcpts & VMX_VMCS_GUEST_PENDING_DEBUG_VALID_MASK));
820 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64GuestPendingDbgXcpts.u = uGuestPendingDbgXcpts;
821}
822
823
824/**
825 * Implements VMSucceed for VMX instruction success.
826 *
827 * @param pVCpu The cross context virtual CPU structure.
828 */
829DECL_FORCE_INLINE(void) iemVmxVmSucceed(PVMCPUCC pVCpu)
830{
831 return CPUMSetGuestVmxVmSucceed(&pVCpu->cpum.GstCtx);
832}
833
834
835/**
836 * Implements VMFailInvalid for VMX instruction failure.
837 *
838 * @param pVCpu The cross context virtual CPU structure.
839 */
840DECL_FORCE_INLINE(void) iemVmxVmFailInvalid(PVMCPUCC pVCpu)
841{
842 return CPUMSetGuestVmxVmFailInvalid(&pVCpu->cpum.GstCtx);
843}
844
845
846/**
847 * Implements VMFail for VMX instruction failure.
848 *
849 * @param pVCpu The cross context virtual CPU structure.
850 * @param enmInsErr The VM instruction error.
851 */
852DECL_FORCE_INLINE(void) iemVmxVmFail(PVMCPUCC pVCpu, VMXINSTRERR enmInsErr)
853{
854 return CPUMSetGuestVmxVmFail(&pVCpu->cpum.GstCtx, enmInsErr);
855}
856
857
858/**
859 * Checks if the given auto-load/store MSR area count is valid for the
860 * implementation.
861 *
862 * @returns @c true if it's within the valid limit, @c false otherwise.
863 * @param pVCpu The cross context virtual CPU structure.
864 * @param uMsrCount The MSR area count to check.
865 */
866DECL_FORCE_INLINE(bool) iemVmxIsAutoMsrCountValid(PCVMCPU pVCpu, uint32_t uMsrCount)
867{
868 uint64_t const u64VmxMiscMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Misc;
869 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(u64VmxMiscMsr);
870 Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
871 if (uMsrCount <= cMaxSupportedMsrs)
872 return true;
873 return false;
874}
875
876
877/**
878 * Flushes the current VMCS contents back to guest memory.
879 *
880 * @returns VBox status code.
881 * @param pVCpu The cross context virtual CPU structure.
882 */
883DECL_FORCE_INLINE(int) iemVmxWriteCurrentVmcsToGstMem(PVMCPUCC pVCpu)
884{
885 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
886 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
887 &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs, sizeof(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs));
888 return rc;
889}
890
891
892/**
893 * Populates the current VMCS contents from guest memory.
894 *
895 * @returns VBox status code.
896 * @param pVCpu The cross context virtual CPU structure.
897 */
898DECL_FORCE_INLINE(int) iemVmxReadCurrentVmcsFromGstMem(PVMCPUCC pVCpu)
899{
900 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
901 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs,
902 IEM_VMX_GET_CURRENT_VMCS(pVCpu), sizeof(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs));
903 return rc;
904}
905
906
907/**
908 * Implements VMSucceed for the VMREAD instruction and increments the guest RIP.
909 *
910 * @param pVCpu The cross context virtual CPU structure.
911 */
912DECL_FORCE_INLINE(void) iemVmxVmreadSuccess(PVMCPUCC pVCpu, uint8_t cbInstr)
913{
914 iemVmxVmSucceed(pVCpu);
915 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
916}
917
918
919/**
920 * Gets the instruction diagnostic for segment base checks during VM-entry of a
921 * nested-guest.
922 *
923 * @param iSegReg The segment index (X86_SREG_XXX).
924 */
925IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBase(unsigned iSegReg)
926{
927 switch (iSegReg)
928 {
929 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseCs;
930 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseDs;
931 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseEs;
932 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseFs;
933 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseGs;
934 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseSs;
935 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_1);
936 }
937}
938
939
940/**
941 * Gets the instruction diagnostic for segment base checks during VM-entry of a
942 * nested-guest that is in Virtual-8086 mode.
943 *
944 * @param iSegReg The segment index (X86_SREG_XXX).
945 */
946IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBaseV86(unsigned iSegReg)
947{
948 switch (iSegReg)
949 {
950 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseV86Cs;
951 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ds;
952 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseV86Es;
953 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseV86Fs;
954 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseV86Gs;
955 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ss;
956 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
957 }
958}
959
960
961/**
962 * Gets the instruction diagnostic for segment limit checks during VM-entry of a
963 * nested-guest that is in Virtual-8086 mode.
964 *
965 * @param iSegReg The segment index (X86_SREG_XXX).
966 */
967IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegLimitV86(unsigned iSegReg)
968{
969 switch (iSegReg)
970 {
971 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegLimitV86Cs;
972 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ds;
973 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegLimitV86Es;
974 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegLimitV86Fs;
975 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegLimitV86Gs;
976 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ss;
977 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_3);
978 }
979}
980
981
982/**
983 * Gets the instruction diagnostic for segment attribute checks during VM-entry of a
984 * nested-guest that is in Virtual-8086 mode.
985 *
986 * @param iSegReg The segment index (X86_SREG_XXX).
987 */
988IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrV86(unsigned iSegReg)
989{
990 switch (iSegReg)
991 {
992 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrV86Cs;
993 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ds;
994 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrV86Es;
995 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrV86Fs;
996 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrV86Gs;
997 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ss;
998 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_4);
999 }
1000}
1001
1002
1003/**
1004 * Gets the instruction diagnostic for segment attributes reserved bits failure
1005 * during VM-entry of a nested-guest.
1006 *
1007 * @param iSegReg The segment index (X86_SREG_XXX).
1008 */
1009IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrRsvd(unsigned iSegReg)
1010{
1011 switch (iSegReg)
1012 {
1013 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdCs;
1014 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdDs;
1015 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrRsvdEs;
1016 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdFs;
1017 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdGs;
1018 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdSs;
1019 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_5);
1020 }
1021}
1022
1023
1024/**
1025 * Gets the instruction diagnostic for segment attributes descriptor-type
1026 * (code/segment or system) failure during VM-entry of a nested-guest.
1027 *
1028 * @param iSegReg The segment index (X86_SREG_XXX).
1029 */
1030IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDescType(unsigned iSegReg)
1031{
1032 switch (iSegReg)
1033 {
1034 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeCs;
1035 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeDs;
1036 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeEs;
1037 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeFs;
1038 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeGs;
1039 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs;
1040 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_6);
1041 }
1042}
1043
1044
1045/**
1046 * Gets the instruction diagnostic for segment attributes descriptor-type
1047 * (code/segment or system) failure during VM-entry of a nested-guest.
1048 *
1049 * @param iSegReg The segment index (X86_SREG_XXX).
1050 */
1051IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrPresent(unsigned iSegReg)
1052{
1053 switch (iSegReg)
1054 {
1055 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrPresentCs;
1056 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrPresentDs;
1057 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrPresentEs;
1058 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrPresentFs;
1059 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrPresentGs;
1060 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrPresentSs;
1061 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_7);
1062 }
1063}
1064
1065
1066/**
1067 * Gets the instruction diagnostic for segment attribute granularity failure during
1068 * VM-entry of a nested-guest.
1069 *
1070 * @param iSegReg The segment index (X86_SREG_XXX).
1071 */
1072IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrGran(unsigned iSegReg)
1073{
1074 switch (iSegReg)
1075 {
1076 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrGranCs;
1077 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrGranDs;
1078 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrGranEs;
1079 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrGranFs;
1080 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrGranGs;
1081 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrGranSs;
1082 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_8);
1083 }
1084}
1085
1086/**
1087 * Gets the instruction diagnostic for segment attribute DPL/RPL failure during
1088 * VM-entry of a nested-guest.
1089 *
1090 * @param iSegReg The segment index (X86_SREG_XXX).
1091 */
1092IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDplRpl(unsigned iSegReg)
1093{
1094 switch (iSegReg)
1095 {
1096 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplCs;
1097 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplDs;
1098 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDplRplEs;
1099 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplFs;
1100 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplGs;
1101 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplSs;
1102 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_9);
1103 }
1104}
1105
1106
1107/**
1108 * Gets the instruction diagnostic for segment attribute type accessed failure
1109 * during VM-entry of a nested-guest.
1110 *
1111 * @param iSegReg The segment index (X86_SREG_XXX).
1112 */
1113IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrTypeAcc(unsigned iSegReg)
1114{
1115 switch (iSegReg)
1116 {
1117 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccCs;
1118 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccDs;
1119 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccEs;
1120 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccFs;
1121 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccGs;
1122 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs;
1123 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_10);
1124 }
1125}
1126
1127
1128/**
1129 * Saves the guest control registers, debug registers and some MSRs are part of
1130 * VM-exit.
1131 *
1132 * @param pVCpu The cross context virtual CPU structure.
1133 */
1134IEM_STATIC void iemVmxVmexitSaveGuestControlRegsMsrs(PVMCPUCC pVCpu)
1135{
1136 /*
1137 * Saves the guest control registers, debug registers and some MSRs.
1138 * See Intel spec. 27.3.1 "Saving Control Registers, Debug Registers and MSRs".
1139 */
1140 PVMXVVMCS pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1141
1142 /* Save control registers. */
1143 pVmcs->u64GuestCr0.u = pVCpu->cpum.GstCtx.cr0;
1144 pVmcs->u64GuestCr3.u = pVCpu->cpum.GstCtx.cr3;
1145 pVmcs->u64GuestCr4.u = pVCpu->cpum.GstCtx.cr4;
1146
1147 /* Save SYSENTER CS, ESP, EIP. */
1148 pVmcs->u32GuestSysenterCS = pVCpu->cpum.GstCtx.SysEnter.cs;
1149 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1150 {
1151 pVmcs->u64GuestSysenterEsp.u = pVCpu->cpum.GstCtx.SysEnter.esp;
1152 pVmcs->u64GuestSysenterEip.u = pVCpu->cpum.GstCtx.SysEnter.eip;
1153 }
1154 else
1155 {
1156 pVmcs->u64GuestSysenterEsp.s.Lo = pVCpu->cpum.GstCtx.SysEnter.esp;
1157 pVmcs->u64GuestSysenterEip.s.Lo = pVCpu->cpum.GstCtx.SysEnter.eip;
1158 }
1159
1160 /* Save debug registers (DR7 and IA32_DEBUGCTL MSR). */
1161 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG)
1162 {
1163 pVmcs->u64GuestDr7.u = pVCpu->cpum.GstCtx.dr[7];
1164 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
1165 }
1166
1167 /* Save PAT MSR. */
1168 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR)
1169 pVmcs->u64GuestPatMsr.u = pVCpu->cpum.GstCtx.msrPAT;
1170
1171 /* Save EFER MSR. */
1172 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR)
1173 pVmcs->u64GuestEferMsr.u = pVCpu->cpum.GstCtx.msrEFER;
1174
1175 /* We don't support clearing IA32_BNDCFGS MSR yet. */
1176 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR));
1177
1178 /* Nothing to do for SMBASE register - We don't support SMM yet. */
1179}
1180
1181
1182/**
1183 * Saves the guest force-flags in preparation of entering the nested-guest.
1184 *
1185 * @param pVCpu The cross context virtual CPU structure.
1186 */
1187IEM_STATIC void iemVmxVmentrySaveNmiBlockingFF(PVMCPUCC pVCpu)
1188{
1189 /* We shouldn't be called multiple times during VM-entry. */
1190 Assert(pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions == 0);
1191
1192 /* MTF should not be set outside VMX non-root mode. */
1193 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
1194
1195 /*
1196 * Preserve the required force-flags.
1197 *
1198 * We cache and clear force-flags that would affect the execution of the
1199 * nested-guest. Cached flags are then restored while returning to the guest
1200 * if necessary.
1201 *
1202 * - VMCPU_FF_INHIBIT_INTERRUPTS need not be cached as it only affects
1203 * interrupts until the completion of the current VMLAUNCH/VMRESUME
1204 * instruction. Interrupt inhibition for any nested-guest instruction
1205 * is supplied by the guest-interruptibility state VMCS field and will
1206 * be set up as part of loading the guest state.
1207 *
1208 * - VMCPU_FF_BLOCK_NMIS needs to be cached as VM-exits caused before
1209 * successful VM-entry (due to invalid guest-state) need to continue
1210 * blocking NMIs if it was in effect before VM-entry.
1211 *
1212 * - MTF need not be preserved as it's used only in VMX non-root mode and
1213 * is supplied through the VM-execution controls.
1214 *
1215 * The remaining FFs (e.g. timers, APIC updates) can stay in place so that
1216 * we will be able to generate interrupts that may cause VM-exits for
1217 * the nested-guest.
1218 */
1219 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
1220}
1221
1222
1223/**
1224 * Restores the guest force-flags in preparation of exiting the nested-guest.
1225 *
1226 * @param pVCpu The cross context virtual CPU structure.
1227 */
1228IEM_STATIC void iemVmxVmexitRestoreNmiBlockingFF(PVMCPUCC pVCpu)
1229{
1230 if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
1231 {
1232 VMCPU_FF_SET_MASK(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);
1233 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;
1234 }
1235}
1236
1237
1238/**
1239 * Performs the VMX transition to/from VMX non-root mode.
1240 *
1241 * @param pVCpu The cross context virtual CPU structure.
1242*/
1243IEM_STATIC int iemVmxTransition(PVMCPUCC pVCpu)
1244{
1245 /*
1246 * Inform PGM about paging mode changes.
1247 * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
1248 * see comment in iemMemPageTranslateAndCheckAccess().
1249 */
1250 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
1251 true /* fForce */);
1252 AssertRCReturn(rc, rc);
1253
1254 /* Invalidate IEM TLBs now that we've forced a PGM mode change. */
1255 IEMTlbInvalidateAll(pVCpu, false /*fVmm*/);
1256
1257 /* Inform CPUM (recompiler), can later be removed. */
1258 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1259
1260 /* Re-initialize IEM cache/state after the drastic mode switch. */
1261 iemReInitExec(pVCpu);
1262 return rc;
1263}
1264
1265
1266/**
1267 * Calculates the current VMX-preemption timer value.
1268 *
1269 * @returns The current VMX-preemption timer value.
1270 * @param pVCpu The cross context virtual CPU structure.
1271 */
1272IEM_STATIC uint32_t iemVmxCalcPreemptTimer(PVMCPUCC pVCpu)
1273{
1274 /*
1275 * Assume the following:
1276 * PreemptTimerShift = 5
1277 * VmcsPreemptTimer = 2 (i.e. need to decrement by 1 every 2 * RT_BIT(5) = 20000 TSC ticks)
1278 * EntryTick = 50000 (TSC at time of VM-entry)
1279 *
1280 * CurTick Delta PreemptTimerVal
1281 * ----------------------------------
1282 * 60000 10000 2
1283 * 80000 30000 1
1284 * 90000 40000 0 -> VM-exit.
1285 *
1286 * If Delta >= VmcsPreemptTimer * RT_BIT(PreemptTimerShift) cause a VMX-preemption timer VM-exit.
1287 * The saved VMX-preemption timer value is calculated as follows:
1288 * PreemptTimerVal = VmcsPreemptTimer - (Delta / (VmcsPreemptTimer * RT_BIT(PreemptTimerShift)))
1289 * E.g.:
1290 * Delta = 10000
1291 * Tmp = 10000 / (2 * 10000) = 0.5
1292 * NewPt = 2 - 0.5 = 2
1293 * Delta = 30000
1294 * Tmp = 30000 / (2 * 10000) = 1.5
1295 * NewPt = 2 - 1.5 = 1
1296 * Delta = 40000
1297 * Tmp = 40000 / 20000 = 2
1298 * NewPt = 2 - 2 = 0
1299 */
1300 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
1301 uint32_t const uVmcsPreemptVal = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PreemptTimer;
1302 if (uVmcsPreemptVal > 0)
1303 {
1304 uint64_t const uCurTick = TMCpuTickGetNoCheck(pVCpu);
1305 uint64_t const uEntryTick = pVCpu->cpum.GstCtx.hwvirt.vmx.uEntryTick;
1306 uint64_t const uDelta = uCurTick - uEntryTick;
1307 uint32_t const uPreemptTimer = uVmcsPreemptVal
1308 - ASMDivU64ByU32RetU32(uDelta, uVmcsPreemptVal * RT_BIT(VMX_V_PREEMPT_TIMER_SHIFT));
1309 return uPreemptTimer;
1310 }
1311 return 0;
1312}
1313
1314
1315/**
1316 * Saves guest segment registers, GDTR, IDTR, LDTR, TR as part of VM-exit.
1317 *
1318 * @param pVCpu The cross context virtual CPU structure.
1319 */
1320IEM_STATIC void iemVmxVmexitSaveGuestSegRegs(PVMCPUCC pVCpu)
1321{
1322 /*
1323 * Save guest segment registers, GDTR, IDTR, LDTR, TR.
1324 * See Intel spec 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
1325 */
1326 /* CS, SS, ES, DS, FS, GS. */
1327 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1328 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
1329 {
1330 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1331 if (!pSelReg->Attr.n.u1Unusable)
1332 iemVmxVmcsSetGuestSegReg(pVmcs, iSegReg, pSelReg);
1333 else
1334 {
1335 /*
1336 * For unusable segments the attributes are undefined except for CS and SS.
1337 * For the rest we don't bother preserving anything but the unusable bit.
1338 */
1339 switch (iSegReg)
1340 {
1341 case X86_SREG_CS:
1342 pVmcs->GuestCs = pSelReg->Sel;
1343 pVmcs->u64GuestCsBase.u = pSelReg->u64Base;
1344 pVmcs->u32GuestCsLimit = pSelReg->u32Limit;
1345 pVmcs->u32GuestCsAttr = pSelReg->Attr.u & ( X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
1346 | X86DESCATTR_UNUSABLE);
1347 break;
1348
1349 case X86_SREG_SS:
1350 pVmcs->GuestSs = pSelReg->Sel;
1351 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1352 pVmcs->u64GuestSsBase.u &= UINT32_C(0xffffffff);
1353 pVmcs->u32GuestSsAttr = pSelReg->Attr.u & (X86DESCATTR_DPL | X86DESCATTR_UNUSABLE);
1354 break;
1355
1356 case X86_SREG_DS:
1357 pVmcs->GuestDs = pSelReg->Sel;
1358 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1359 pVmcs->u64GuestDsBase.u &= UINT32_C(0xffffffff);
1360 pVmcs->u32GuestDsAttr = X86DESCATTR_UNUSABLE;
1361 break;
1362
1363 case X86_SREG_ES:
1364 pVmcs->GuestEs = pSelReg->Sel;
1365 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1366 pVmcs->u64GuestEsBase.u &= UINT32_C(0xffffffff);
1367 pVmcs->u32GuestEsAttr = X86DESCATTR_UNUSABLE;
1368 break;
1369
1370 case X86_SREG_FS:
1371 pVmcs->GuestFs = pSelReg->Sel;
1372 pVmcs->u64GuestFsBase.u = pSelReg->u64Base;
1373 pVmcs->u32GuestFsAttr = X86DESCATTR_UNUSABLE;
1374 break;
1375
1376 case X86_SREG_GS:
1377 pVmcs->GuestGs = pSelReg->Sel;
1378 pVmcs->u64GuestGsBase.u = pSelReg->u64Base;
1379 pVmcs->u32GuestGsAttr = X86DESCATTR_UNUSABLE;
1380 break;
1381 }
1382 }
1383 }
1384
1385 /* Segment attribute bits 31:17 and 11:8 MBZ. */
1386 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
1387 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
1388 | X86DESCATTR_UNUSABLE;
1389 /* LDTR. */
1390 {
1391 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.ldtr;
1392 pVmcs->GuestLdtr = pSelReg->Sel;
1393 pVmcs->u64GuestLdtrBase.u = pSelReg->u64Base;
1394 Assert(X86_IS_CANONICAL(pSelReg->u64Base));
1395 pVmcs->u32GuestLdtrLimit = pSelReg->u32Limit;
1396 pVmcs->u32GuestLdtrAttr = pSelReg->Attr.u & fValidAttrMask;
1397 }
1398
1399 /* TR. */
1400 {
1401 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.tr;
1402 pVmcs->GuestTr = pSelReg->Sel;
1403 pVmcs->u64GuestTrBase.u = pSelReg->u64Base;
1404 pVmcs->u32GuestTrLimit = pSelReg->u32Limit;
1405 pVmcs->u32GuestTrAttr = pSelReg->Attr.u & fValidAttrMask;
1406 }
1407
1408 /* GDTR. */
1409 pVmcs->u64GuestGdtrBase.u = pVCpu->cpum.GstCtx.gdtr.pGdt;
1410 pVmcs->u32GuestGdtrLimit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
1411
1412 /* IDTR. */
1413 pVmcs->u64GuestIdtrBase.u = pVCpu->cpum.GstCtx.idtr.pIdt;
1414 pVmcs->u32GuestIdtrLimit = pVCpu->cpum.GstCtx.idtr.cbIdt;
1415}
1416
1417
1418/**
1419 * Saves guest non-register state as part of VM-exit.
1420 *
1421 * @param pVCpu The cross context virtual CPU structure.
1422 * @param uExitReason The VM-exit reason.
1423 */
1424IEM_STATIC void iemVmxVmexitSaveGuestNonRegState(PVMCPUCC pVCpu, uint32_t uExitReason)
1425{
1426 /*
1427 * Save guest non-register state.
1428 * See Intel spec. 27.3.4 "Saving Non-Register State".
1429 */
1430 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1431
1432 /*
1433 * Activity state.
1434 * Most VM-exits will occur in the active state. However, if the first instruction
1435 * following the VM-entry is a HLT instruction, and the MTF VM-execution control is set,
1436 * the VM-exit will be from the HLT activity state.
1437 *
1438 * See Intel spec. 25.5.2 "Monitor Trap Flag".
1439 */
1440 /** @todo NSTVMX: Does triple-fault VM-exit reflect a shutdown activity state or
1441 * not? */
1442 EMSTATE const enmActivityState = EMGetState(pVCpu);
1443 switch (enmActivityState)
1444 {
1445 case EMSTATE_HALTED: pVmcs->u32GuestActivityState = VMX_VMCS_GUEST_ACTIVITY_HLT; break;
1446 default: pVmcs->u32GuestActivityState = VMX_VMCS_GUEST_ACTIVITY_ACTIVE; break;
1447 }
1448
1449 /*
1450 * Interruptibility-state.
1451 */
1452 /* NMI. */
1453 pVmcs->u32GuestIntrState = 0;
1454 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
1455 {
1456 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
1457 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1458 }
1459 else
1460 {
1461 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1462 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1463 }
1464
1465 /* Blocking-by-STI. */
1466 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1467 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
1468 {
1469 /** @todo NSTVMX: We can't distinguish between blocking-by-MovSS and blocking-by-STI
1470 * currently. */
1471 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1472 }
1473 /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */
1474
1475 /*
1476 * Pending debug exceptions.
1477 *
1478 * For VM-exits where it is not applicable, we can safely zero out the field.
1479 * For VM-exits where it is applicable, it's expected to be updated by the caller already.
1480 */
1481 if ( uExitReason != VMX_EXIT_INIT_SIGNAL
1482 && uExitReason != VMX_EXIT_SMI
1483 && uExitReason != VMX_EXIT_ERR_MACHINE_CHECK
1484 && !VMXIsVmexitTrapLike(uExitReason))
1485 {
1486 /** @todo NSTVMX: also must exclude VM-exits caused by debug exceptions when
1487 * block-by-MovSS is in effect. */
1488 pVmcs->u64GuestPendingDbgXcpts.u = 0;
1489 }
1490
1491 /*
1492 * Save the VMX-preemption timer value back into the VMCS if the feature is enabled.
1493 *
1494 * For VMX-preemption timer VM-exits, we should have already written back 0 if the
1495 * feature is supported back into the VMCS, and thus there is nothing further to do here.
1496 */
1497 if ( uExitReason != VMX_EXIT_PREEMPT_TIMER
1498 && (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
1499 pVmcs->u32PreemptTimer = iemVmxCalcPreemptTimer(pVCpu);
1500
1501 /*
1502 * PAE PDPTEs.
1503 *
1504 * If EPT is enabled and PAE paging was used at the time of the VM-exit,
1505 * the PDPTEs are saved from the VMCS. Otherwise they're undefined but
1506 * we zero them for consistency.
1507 */
1508 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
1509 {
1510 if ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST)
1511 && (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
1512 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
1513 {
1514 pVmcs->u64GuestPdpte0.u = pVCpu->cpum.GstCtx.aPaePdpes[0].u;
1515 pVmcs->u64GuestPdpte1.u = pVCpu->cpum.GstCtx.aPaePdpes[1].u;
1516 pVmcs->u64GuestPdpte2.u = pVCpu->cpum.GstCtx.aPaePdpes[2].u;
1517 pVmcs->u64GuestPdpte3.u = pVCpu->cpum.GstCtx.aPaePdpes[3].u;
1518 }
1519 else
1520 {
1521 pVmcs->u64GuestPdpte0.u = 0;
1522 pVmcs->u64GuestPdpte1.u = 0;
1523 pVmcs->u64GuestPdpte2.u = 0;
1524 pVmcs->u64GuestPdpte3.u = 0;
1525 }
1526
1527 /* Clear PGM's copy of the EPT pointer for added safety. */
1528 PGMSetGuestEptPtr(pVCpu, 0 /* uEptPtr */);
1529 }
1530 else
1531 {
1532 pVmcs->u64GuestPdpte0.u = 0;
1533 pVmcs->u64GuestPdpte1.u = 0;
1534 pVmcs->u64GuestPdpte2.u = 0;
1535 pVmcs->u64GuestPdpte3.u = 0;
1536 }
1537}
1538
1539
1540/**
1541 * Saves the guest-state as part of VM-exit.
1542 *
1543 * @returns VBox status code.
1544 * @param pVCpu The cross context virtual CPU structure.
1545 * @param uExitReason The VM-exit reason.
1546 */
1547IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPUCC pVCpu, uint32_t uExitReason)
1548{
1549 iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu);
1550 iemVmxVmexitSaveGuestSegRegs(pVCpu);
1551
1552 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64GuestRip.u = pVCpu->cpum.GstCtx.rip;
1553 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64GuestRsp.u = pVCpu->cpum.GstCtx.rsp;
1554 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u; /** @todo NSTVMX: Check RFLAGS.RF handling. */
1555
1556 iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason);
1557}
1558
1559
1560/**
1561 * Saves the guest MSRs into the VM-exit MSR-store area as part of VM-exit.
1562 *
1563 * @returns VBox status code.
1564 * @param pVCpu The cross context virtual CPU structure.
1565 * @param uExitReason The VM-exit reason (for diagnostic purposes).
1566 */
1567IEM_STATIC int iemVmxVmexitSaveGuestAutoMsrs(PVMCPUCC pVCpu, uint32_t uExitReason)
1568{
1569 /*
1570 * Save guest MSRs.
1571 * See Intel spec. 27.4 "Saving MSRs".
1572 */
1573 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1574 const char * const pszFailure = "VMX-abort";
1575
1576 /*
1577 * The VM-exit MSR-store area address need not be a valid guest-physical address if the
1578 * VM-exit MSR-store count is 0. If this is the case, bail early without reading it.
1579 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
1580 */
1581 uint32_t const cMsrs = RT_MIN(pVmcs->u32ExitMsrStoreCount, RT_ELEMENTS(pVCpu->cpum.GstCtx.hwvirt.vmx.aExitMsrStoreArea));
1582 if (!cMsrs)
1583 return VINF_SUCCESS;
1584
1585 /*
1586 * Verify the MSR auto-store count. Physical CPUs can behave unpredictably if the count
1587 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
1588 * implementation causes a VMX-abort followed by a triple-fault.
1589 */
1590 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
1591 if (fIsMsrCountValid)
1592 { /* likely */ }
1593 else
1594 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount);
1595
1596 /*
1597 * Optimization if the nested hypervisor is using the same guest-physical page for both
1598 * the VM-entry MSR-load area as well as the VM-exit MSR store area.
1599 */
1600 PVMXAUTOMSR pMsrArea;
1601 RTGCPHYS const GCPhysVmEntryMsrLoadArea = pVmcs->u64AddrEntryMsrLoad.u;
1602 RTGCPHYS const GCPhysVmExitMsrStoreArea = pVmcs->u64AddrExitMsrStore.u;
1603 if (GCPhysVmEntryMsrLoadArea == GCPhysVmExitMsrStoreArea)
1604 pMsrArea = pVCpu->cpum.GstCtx.hwvirt.vmx.aEntryMsrLoadArea;
1605 else
1606 {
1607 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.aExitMsrStoreArea[0],
1608 GCPhysVmExitMsrStoreArea, cMsrs * sizeof(VMXAUTOMSR));
1609 if (RT_SUCCESS(rc))
1610 pMsrArea = pVCpu->cpum.GstCtx.hwvirt.vmx.aExitMsrStoreArea;
1611 else
1612 {
1613 AssertMsgFailed(("VM-exit: Failed to read MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrStoreArea, rc));
1614 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrReadPhys);
1615 }
1616 }
1617
1618 /*
1619 * Update VM-exit MSR store area.
1620 */
1621 PVMXAUTOMSR pMsr = pMsrArea;
1622 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
1623 {
1624 if ( !pMsr->u32Reserved
1625 && pMsr->u32Msr != MSR_IA32_SMBASE
1626 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
1627 {
1628 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pMsr->u32Msr, &pMsr->u64Value);
1629 if (rcStrict == VINF_SUCCESS)
1630 continue;
1631
1632 /*
1633 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
1634 * If any nested hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
1635 * recording the MSR index in the auxiliary info. field and indicated further by our
1636 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
1637 * if possible, or come up with a better, generic solution.
1638 */
1639 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
1640 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_READ
1641 ? kVmxVDiag_Vmexit_MsrStoreRing3
1642 : kVmxVDiag_Vmexit_MsrStore;
1643 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
1644 }
1645 else
1646 {
1647 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
1648 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreRsvd);
1649 }
1650 }
1651
1652 /*
1653 * Commit the VM-exit MSR store are to guest memory.
1654 */
1655 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmExitMsrStoreArea, pMsrArea, cMsrs * sizeof(VMXAUTOMSR));
1656 if (RT_SUCCESS(rc))
1657 return VINF_SUCCESS;
1658
1659 NOREF(uExitReason);
1660 NOREF(pszFailure);
1661
1662 AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrStoreArea, rc));
1663 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys);
1664}
1665
1666
1667/**
1668 * Performs a VMX abort (due to an fatal error during VM-exit).
1669 *
1670 * @returns Strict VBox status code.
1671 * @param pVCpu The cross context virtual CPU structure.
1672 * @param enmAbort The VMX abort reason.
1673 */
1674IEM_STATIC VBOXSTRICTRC iemVmxAbort(PVMCPUCC pVCpu, VMXABORT enmAbort)
1675{
1676 /*
1677 * Perform the VMX abort.
1678 * See Intel spec. 27.7 "VMX Aborts".
1679 */
1680 LogFunc(("enmAbort=%u (%s) -> RESET\n", enmAbort, VMXGetAbortDesc(enmAbort)));
1681
1682 /* We don't support SMX yet. */
1683 pVCpu->cpum.GstCtx.hwvirt.vmx.enmAbort = enmAbort;
1684 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1685 {
1686 RTGCPHYS const GCPhysVmcs = IEM_VMX_GET_CURRENT_VMCS(pVCpu);
1687 uint32_t const offVmxAbort = RT_UOFFSETOF(VMXVVMCS, enmVmxAbort);
1688 PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + offVmxAbort, &enmAbort, sizeof(enmAbort));
1689 }
1690
1691 return VINF_EM_TRIPLE_FAULT;
1692}
1693
1694
1695/**
1696 * Loads host control registers, debug registers and MSRs as part of VM-exit.
1697 *
1698 * @param pVCpu The cross context virtual CPU structure.
1699 */
1700IEM_STATIC void iemVmxVmexitLoadHostControlRegsMsrs(PVMCPUCC pVCpu)
1701{
1702 /*
1703 * Load host control registers, debug registers and MSRs.
1704 * See Intel spec. 27.5.1 "Loading Host Control Registers, Debug Registers, MSRs".
1705 */
1706 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1707 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
1708
1709 /* CR0. */
1710 {
1711 /* Bits 63:32, 28:19, 17, 15:6, ET, CD, NW and CR0 fixed bits are not modified. */
1712 uint64_t const uCr0Mb1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
1713 uint64_t const uCr0Mb0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
1714 uint64_t const fCr0IgnMask = VMX_EXIT_HOST_CR0_IGNORE_MASK | uCr0Mb1 | ~uCr0Mb0;
1715 uint64_t const uHostCr0 = pVmcs->u64HostCr0.u;
1716 uint64_t const uGuestCr0 = pVCpu->cpum.GstCtx.cr0;
1717 uint64_t const uValidHostCr0 = (uHostCr0 & ~fCr0IgnMask) | (uGuestCr0 & fCr0IgnMask);
1718
1719 /* Verify we have not modified CR0 fixed bits in VMX non-root operation. */
1720 Assert((uGuestCr0 & uCr0Mb1) == uCr0Mb1);
1721 Assert((uGuestCr0 & ~uCr0Mb0) == 0);
1722 CPUMSetGuestCR0(pVCpu, uValidHostCr0);
1723 }
1724
1725 /* CR4. */
1726 {
1727 /* CR4 fixed bits are not modified. */
1728 uint64_t const uCr4Mb1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
1729 uint64_t const uCr4Mb0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
1730 uint64_t const fCr4IgnMask = uCr4Mb1 | ~uCr4Mb0;
1731 uint64_t const uHostCr4 = pVmcs->u64HostCr4.u;
1732 uint64_t const uGuestCr4 = pVCpu->cpum.GstCtx.cr4;
1733 uint64_t uValidHostCr4 = (uHostCr4 & ~fCr4IgnMask) | (uGuestCr4 & fCr4IgnMask);
1734 if (fHostInLongMode)
1735 uValidHostCr4 |= X86_CR4_PAE;
1736 else
1737 uValidHostCr4 &= ~(uint64_t)X86_CR4_PCIDE;
1738
1739 /* Verify we have not modified CR4 fixed bits in VMX non-root operation. */
1740 Assert((uGuestCr4 & uCr4Mb1) == uCr4Mb1);
1741 Assert((uGuestCr4 & ~uCr4Mb0) == 0);
1742 CPUMSetGuestCR4(pVCpu, uValidHostCr4);
1743 }
1744
1745 /* CR3 (host value validated while checking host-state during VM-entry). */
1746 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64HostCr3.u;
1747
1748 /* DR7. */
1749 pVCpu->cpum.GstCtx.dr[7] = X86_DR7_INIT_VAL;
1750
1751 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
1752
1753 /* Save SYSENTER CS, ESP, EIP (host value validated while checking host-state during VM-entry). */
1754 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64HostSysenterEip.u;
1755 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64HostSysenterEsp.u;
1756 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32HostSysenterCs;
1757
1758 /* FS, GS bases are loaded later while we load host segment registers. */
1759
1760 /* EFER MSR (host value validated while checking host-state during VM-entry). */
1761 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
1762 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64HostEferMsr.u;
1763 else if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1764 {
1765 if (fHostInLongMode)
1766 pVCpu->cpum.GstCtx.msrEFER |= (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
1767 else
1768 pVCpu->cpum.GstCtx.msrEFER &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
1769 }
1770
1771 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
1772
1773 /* PAT MSR (host value is validated while checking host-state during VM-entry). */
1774 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
1775 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64HostPatMsr.u;
1776
1777 /* We don't support IA32_BNDCFGS MSR yet. */
1778}
1779
1780
1781/**
1782 * Loads host segment registers, GDTR, IDTR, LDTR and TR as part of VM-exit.
1783 *
1784 * @param pVCpu The cross context virtual CPU structure.
1785 */
1786IEM_STATIC void iemVmxVmexitLoadHostSegRegs(PVMCPUCC pVCpu)
1787{
1788 /*
1789 * Load host segment registers, GDTR, IDTR, LDTR and TR.
1790 * See Intel spec. 27.5.2 "Loading Host Segment and Descriptor-Table Registers".
1791 *
1792 * Warning! Be careful to not touch fields that are reserved by VT-x,
1793 * e.g. segment limit high bits stored in segment attributes (in bits 11:8).
1794 */
1795 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1796 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
1797
1798 /* CS, SS, ES, DS, FS, GS. */
1799 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
1800 {
1801 RTSEL const HostSel = iemVmxVmcsGetHostSelReg(pVmcs, iSegReg);
1802 bool const fUnusable = RT_BOOL(HostSel == 0);
1803 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1804
1805 /* Selector. */
1806 pSelReg->Sel = HostSel;
1807 pSelReg->ValidSel = HostSel;
1808 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
1809
1810 /* Limit. */
1811 pSelReg->u32Limit = 0xffffffff;
1812
1813 /* Base. */
1814 pSelReg->u64Base = 0;
1815
1816 /* Attributes. */
1817 if (iSegReg == X86_SREG_CS)
1818 {
1819 pSelReg->Attr.n.u4Type = X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED;
1820 pSelReg->Attr.n.u1DescType = 1;
1821 pSelReg->Attr.n.u2Dpl = 0;
1822 pSelReg->Attr.n.u1Present = 1;
1823 pSelReg->Attr.n.u1Long = fHostInLongMode;
1824 pSelReg->Attr.n.u1DefBig = !fHostInLongMode;
1825 pSelReg->Attr.n.u1Granularity = 1;
1826 Assert(!pSelReg->Attr.n.u1Unusable);
1827 Assert(!fUnusable);
1828 }
1829 else
1830 {
1831 pSelReg->Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
1832 pSelReg->Attr.n.u1DescType = 1;
1833 pSelReg->Attr.n.u2Dpl = 0;
1834 pSelReg->Attr.n.u1Present = 1;
1835 pSelReg->Attr.n.u1DefBig = 1;
1836 pSelReg->Attr.n.u1Granularity = 1;
1837 pSelReg->Attr.n.u1Unusable = fUnusable;
1838 }
1839 }
1840
1841 /* FS base. */
1842 if ( !pVCpu->cpum.GstCtx.fs.Attr.n.u1Unusable
1843 || fHostInLongMode)
1844 {
1845 Assert(X86_IS_CANONICAL(pVmcs->u64HostFsBase.u));
1846 pVCpu->cpum.GstCtx.fs.u64Base = pVmcs->u64HostFsBase.u;
1847 }
1848
1849 /* GS base. */
1850 if ( !pVCpu->cpum.GstCtx.gs.Attr.n.u1Unusable
1851 || fHostInLongMode)
1852 {
1853 Assert(X86_IS_CANONICAL(pVmcs->u64HostGsBase.u));
1854 pVCpu->cpum.GstCtx.gs.u64Base = pVmcs->u64HostGsBase.u;
1855 }
1856
1857 /* TR. */
1858 Assert(X86_IS_CANONICAL(pVmcs->u64HostTrBase.u));
1859 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1Unusable);
1860 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->HostTr;
1861 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->HostTr;
1862 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
1863 pVCpu->cpum.GstCtx.tr.u32Limit = X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN;
1864 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64HostTrBase.u;
1865 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1866 pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType = 0;
1867 pVCpu->cpum.GstCtx.tr.Attr.n.u2Dpl = 0;
1868 pVCpu->cpum.GstCtx.tr.Attr.n.u1Present = 1;
1869 pVCpu->cpum.GstCtx.tr.Attr.n.u1DefBig = 0;
1870 pVCpu->cpum.GstCtx.tr.Attr.n.u1Granularity = 0;
1871
1872 /* LDTR (Warning! do not touch the base and limits here). */
1873 pVCpu->cpum.GstCtx.ldtr.Sel = 0;
1874 pVCpu->cpum.GstCtx.ldtr.ValidSel = 0;
1875 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1876 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
1877
1878 /* GDTR. */
1879 Assert(X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u));
1880 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64HostGdtrBase.u;
1881 pVCpu->cpum.GstCtx.gdtr.cbGdt = 0xffff;
1882
1883 /* IDTR.*/
1884 Assert(X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u));
1885 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64HostIdtrBase.u;
1886 pVCpu->cpum.GstCtx.idtr.cbIdt = 0xffff;
1887}
1888
1889
1890/**
1891 * Loads the host MSRs from the VM-exit MSR-load area as part of VM-exit.
1892 *
1893 * @returns VBox status code.
1894 * @param pVCpu The cross context virtual CPU structure.
1895 * @param uExitReason The VMX instruction name (for logging purposes).
1896 */
1897IEM_STATIC int iemVmxVmexitLoadHostAutoMsrs(PVMCPUCC pVCpu, uint32_t uExitReason)
1898{
1899 /*
1900 * Load host MSRs.
1901 * See Intel spec. 27.6 "Loading MSRs".
1902 */
1903 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1904 const char * const pszFailure = "VMX-abort";
1905
1906 /*
1907 * The VM-exit MSR-load area address need not be a valid guest-physical address if the
1908 * VM-exit MSR load count is 0. If this is the case, bail early without reading it.
1909 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
1910 */
1911 uint32_t const cMsrs = RT_MIN(pVmcs->u32ExitMsrLoadCount, RT_ELEMENTS(pVCpu->cpum.GstCtx.hwvirt.vmx.aExitMsrLoadArea));
1912 if (!cMsrs)
1913 return VINF_SUCCESS;
1914
1915 /*
1916 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count
1917 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
1918 * implementation causes a VMX-abort followed by a triple-fault.
1919 */
1920 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
1921 if (fIsMsrCountValid)
1922 { /* likely */ }
1923 else
1924 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount);
1925
1926 RTGCPHYS const GCPhysVmExitMsrLoadArea = pVmcs->u64AddrExitMsrLoad.u;
1927 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.aExitMsrLoadArea[0],
1928 GCPhysVmExitMsrLoadArea, cMsrs * sizeof(VMXAUTOMSR));
1929 if (RT_SUCCESS(rc))
1930 {
1931 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.aExitMsrLoadArea;
1932 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
1933 {
1934 if ( !pMsr->u32Reserved
1935 && pMsr->u32Msr != MSR_K8_FS_BASE
1936 && pMsr->u32Msr != MSR_K8_GS_BASE
1937 && pMsr->u32Msr != MSR_K6_EFER
1938 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
1939 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
1940 {
1941 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
1942 if (rcStrict == VINF_SUCCESS)
1943 continue;
1944
1945 /*
1946 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
1947 * If any nested hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
1948 * recording the MSR index in the auxiliary info. field and indicated further by our
1949 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
1950 * if possible, or come up with a better, generic solution.
1951 */
1952 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
1953 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
1954 ? kVmxVDiag_Vmexit_MsrLoadRing3
1955 : kVmxVDiag_Vmexit_MsrLoad;
1956 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
1957 }
1958 else
1959 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadRsvd);
1960 }
1961 }
1962 else
1963 {
1964 AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrLoadArea, rc));
1965 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys);
1966 }
1967
1968 NOREF(uExitReason);
1969 NOREF(pszFailure);
1970 return VINF_SUCCESS;
1971}
1972
1973
1974/**
1975 * Loads the host state as part of VM-exit.
1976 *
1977 * @returns Strict VBox status code.
1978 * @param pVCpu The cross context virtual CPU structure.
1979 * @param uExitReason The VM-exit reason (for logging purposes).
1980 */
1981IEM_STATIC VBOXSTRICTRC iemVmxVmexitLoadHostState(PVMCPUCC pVCpu, uint32_t uExitReason)
1982{
1983 /*
1984 * Load host state.
1985 * See Intel spec. 27.5 "Loading Host State".
1986 */
1987 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
1988 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
1989
1990 /* We cannot return from a long-mode guest to a host that is not in long mode. */
1991 if ( CPUMIsGuestInLongMode(pVCpu)
1992 && !fHostInLongMode)
1993 {
1994 Log(("VM-exit from long-mode guest to host not in long-mode -> VMX-Abort\n"));
1995 return iemVmxAbort(pVCpu, VMXABORT_HOST_NOT_IN_LONG_MODE);
1996 }
1997
1998 /*
1999 * Check host PAE PDPTEs prior to loading the host state.
2000 * See Intel spec. 26.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
2001 */
2002 if ( (pVmcs->u64HostCr4.u & X86_CR4_PAE)
2003 && !fHostInLongMode
2004 && ( !CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx)
2005 || pVmcs->u64HostCr3.u != pVCpu->cpum.GstCtx.cr3))
2006 {
2007 int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64HostCr3.u);
2008 if (RT_SUCCESS(rc))
2009 { /* likely*/ }
2010 else
2011 {
2012 IEM_VMX_VMEXIT_FAILED(pVCpu, uExitReason, "VMX-abort", kVmxVDiag_Vmexit_HostPdpte);
2013 return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
2014 }
2015 }
2016
2017 iemVmxVmexitLoadHostControlRegsMsrs(pVCpu);
2018 iemVmxVmexitLoadHostSegRegs(pVCpu);
2019
2020 /*
2021 * Load host RIP, RSP and RFLAGS.
2022 * See Intel spec. 27.5.3 "Loading Host RIP, RSP and RFLAGS"
2023 */
2024 pVCpu->cpum.GstCtx.rip = pVmcs->u64HostRip.u;
2025 pVCpu->cpum.GstCtx.rsp = pVmcs->u64HostRsp.u;
2026 pVCpu->cpum.GstCtx.rflags.u = X86_EFL_1;
2027
2028 /* Clear address range monitoring. */
2029 EMMonitorWaitClear(pVCpu);
2030
2031 /* Perform the VMX transition (PGM updates). */
2032 VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu);
2033 if (rcStrict == VINF_SUCCESS)
2034 { /* likely */ }
2035 else if (RT_SUCCESS(rcStrict))
2036 {
2037 Log3(("VM-exit: iemVmxTransition returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),
2038 uExitReason));
2039 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2040 }
2041 else
2042 {
2043 Log3(("VM-exit: iemVmxTransition failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
2044 return VBOXSTRICTRC_VAL(rcStrict);
2045 }
2046
2047 Assert(rcStrict == VINF_SUCCESS);
2048
2049 /* Load MSRs from the VM-exit auto-load MSR area. */
2050 int rc = iemVmxVmexitLoadHostAutoMsrs(pVCpu, uExitReason);
2051 if (RT_FAILURE(rc))
2052 {
2053 Log(("VM-exit failed while loading host MSRs -> VMX-Abort\n"));
2054 return iemVmxAbort(pVCpu, VMXABORT_LOAD_HOST_MSR);
2055 }
2056 return VINF_SUCCESS;
2057}
2058
2059
2060/**
2061 * Gets VM-exit instruction information along with any displacement for an
2062 * instruction VM-exit.
2063 *
2064 * @returns The VM-exit instruction information.
2065 * @param pVCpu The cross context virtual CPU structure.
2066 * @param uExitReason The VM-exit reason.
2067 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_XXX).
2068 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
2069 * NULL.
2070 */
2071IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPUCC pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, PRTGCPTR pGCPtrDisp)
2072{
2073 RTGCPTR GCPtrDisp;
2074 VMXEXITINSTRINFO ExitInstrInfo;
2075 ExitInstrInfo.u = 0;
2076
2077 /*
2078 * Get and parse the ModR/M byte from our decoded opcodes.
2079 */
2080 uint8_t bRm;
2081 uint8_t const offModRm = pVCpu->iem.s.offModRm;
2082 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
2083 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2084 {
2085 /*
2086 * ModR/M indicates register addressing.
2087 *
2088 * The primary/secondary register operands are reported in the iReg1 or iReg2
2089 * fields depending on whether it is a read/write form.
2090 */
2091 uint8_t idxReg1;
2092 uint8_t idxReg2;
2093 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
2094 {
2095 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
2096 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
2097 }
2098 else
2099 {
2100 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
2101 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
2102 }
2103 ExitInstrInfo.All.u2Scaling = 0;
2104 ExitInstrInfo.All.iReg1 = idxReg1;
2105 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
2106 ExitInstrInfo.All.fIsRegOperand = 1;
2107 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
2108 ExitInstrInfo.All.iSegReg = 0;
2109 ExitInstrInfo.All.iIdxReg = 0;
2110 ExitInstrInfo.All.fIdxRegInvalid = 1;
2111 ExitInstrInfo.All.iBaseReg = 0;
2112 ExitInstrInfo.All.fBaseRegInvalid = 1;
2113 ExitInstrInfo.All.iReg2 = idxReg2;
2114
2115 /* Displacement not applicable for register addressing. */
2116 GCPtrDisp = 0;
2117 }
2118 else
2119 {
2120 /*
2121 * ModR/M indicates memory addressing.
2122 */
2123 uint8_t uScale = 0;
2124 bool fBaseRegValid = false;
2125 bool fIdxRegValid = false;
2126 uint8_t iBaseReg = 0;
2127 uint8_t iIdxReg = 0;
2128 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
2129 {
2130 /*
2131 * Parse the ModR/M, displacement for 16-bit addressing mode.
2132 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
2133 */
2134 uint16_t u16Disp = 0;
2135 uint8_t const offDisp = offModRm + sizeof(bRm);
2136 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
2137 {
2138 /* Displacement without any registers. */
2139 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
2140 }
2141 else
2142 {
2143 /* Register (index and base). */
2144 switch (bRm & X86_MODRM_RM_MASK)
2145 {
2146 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2147 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2148 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2149 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2150 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2151 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2152 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
2153 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
2154 }
2155
2156 /* Register + displacement. */
2157 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2158 {
2159 case 0: break;
2160 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
2161 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
2162 default:
2163 {
2164 /* Register addressing, handled at the beginning. */
2165 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2166 break;
2167 }
2168 }
2169 }
2170
2171 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
2172 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
2173 }
2174 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
2175 {
2176 /*
2177 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
2178 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
2179 */
2180 uint32_t u32Disp = 0;
2181 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
2182 {
2183 /* Displacement without any registers. */
2184 uint8_t const offDisp = offModRm + sizeof(bRm);
2185 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
2186 }
2187 else
2188 {
2189 /* Register (and perhaps scale, index and base). */
2190 uint8_t offDisp = offModRm + sizeof(bRm);
2191 iBaseReg = (bRm & X86_MODRM_RM_MASK);
2192 if (iBaseReg == 4)
2193 {
2194 /* An SIB byte follows the ModR/M byte, parse it. */
2195 uint8_t bSib;
2196 uint8_t const offSib = offModRm + sizeof(bRm);
2197 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
2198
2199 /* A displacement may follow SIB, update its offset. */
2200 offDisp += sizeof(bSib);
2201
2202 /* Get the scale. */
2203 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
2204
2205 /* Get the index register. */
2206 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
2207 fIdxRegValid = RT_BOOL(iIdxReg != 4);
2208
2209 /* Get the base register. */
2210 iBaseReg = bSib & X86_SIB_BASE_MASK;
2211 fBaseRegValid = true;
2212 if (iBaseReg == 5)
2213 {
2214 if ((bRm & X86_MODRM_MOD_MASK) == 0)
2215 {
2216 /* Mod is 0 implies a 32-bit displacement with no base. */
2217 fBaseRegValid = false;
2218 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
2219 }
2220 else
2221 {
2222 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
2223 iBaseReg = X86_GREG_xBP;
2224 }
2225 }
2226 }
2227
2228 /* Register + displacement. */
2229 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2230 {
2231 case 0: /* Handled above */ break;
2232 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
2233 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
2234 default:
2235 {
2236 /* Register addressing, handled at the beginning. */
2237 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2238 break;
2239 }
2240 }
2241 }
2242
2243 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
2244 }
2245 else
2246 {
2247 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
2248
2249 /*
2250 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
2251 * See Intel instruction spec. 2.2 "IA-32e Mode".
2252 */
2253 uint64_t u64Disp = 0;
2254 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
2255 if (fRipRelativeAddr)
2256 {
2257 /*
2258 * RIP-relative addressing mode.
2259 *
2260 * The displacement is 32-bit signed implying an offset range of +/-2G.
2261 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
2262 */
2263 uint8_t const offDisp = offModRm + sizeof(bRm);
2264 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
2265 }
2266 else
2267 {
2268 uint8_t offDisp = offModRm + sizeof(bRm);
2269
2270 /*
2271 * Register (and perhaps scale, index and base).
2272 *
2273 * REX.B extends the most-significant bit of the base register. However, REX.B
2274 * is ignored while determining whether an SIB follows the opcode. Hence, we
2275 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
2276 *
2277 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
2278 */
2279 iBaseReg = (bRm & X86_MODRM_RM_MASK);
2280 if (iBaseReg == 4)
2281 {
2282 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
2283 uint8_t bSib;
2284 uint8_t const offSib = offModRm + sizeof(bRm);
2285 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
2286
2287 /* Displacement may follow SIB, update its offset. */
2288 offDisp += sizeof(bSib);
2289
2290 /* Get the scale. */
2291 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
2292
2293 /* Get the index. */
2294 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
2295 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
2296
2297 /* Get the base. */
2298 iBaseReg = (bSib & X86_SIB_BASE_MASK);
2299 fBaseRegValid = true;
2300 if (iBaseReg == 5)
2301 {
2302 if ((bRm & X86_MODRM_MOD_MASK) == 0)
2303 {
2304 /* Mod is 0 implies a signed 32-bit displacement with no base. */
2305 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
2306 }
2307 else
2308 {
2309 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
2310 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
2311 }
2312 }
2313 }
2314 iBaseReg |= pVCpu->iem.s.uRexB;
2315
2316 /* Register + displacement. */
2317 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2318 {
2319 case 0: /* Handled above */ break;
2320 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
2321 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
2322 default:
2323 {
2324 /* Register addressing, handled at the beginning. */
2325 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2326 break;
2327 }
2328 }
2329 }
2330
2331 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
2332 }
2333
2334 /*
2335 * The primary or secondary register operand is reported in iReg2 depending
2336 * on whether the primary operand is in read/write form.
2337 */
2338 uint8_t idxReg2;
2339 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
2340 {
2341 idxReg2 = bRm & X86_MODRM_RM_MASK;
2342 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
2343 idxReg2 |= pVCpu->iem.s.uRexB;
2344 }
2345 else
2346 {
2347 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK;
2348 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
2349 idxReg2 |= pVCpu->iem.s.uRexReg;
2350 }
2351 ExitInstrInfo.All.u2Scaling = uScale;
2352 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */
2353 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
2354 ExitInstrInfo.All.fIsRegOperand = 0;
2355 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
2356 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
2357 ExitInstrInfo.All.iIdxReg = iIdxReg;
2358 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
2359 ExitInstrInfo.All.iBaseReg = iBaseReg;
2360 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
2361 ExitInstrInfo.All.iReg2 = idxReg2;
2362 }
2363
2364 /*
2365 * Handle exceptions to the norm for certain instructions.
2366 * (e.g. some instructions convey an instruction identity in place of iReg2).
2367 */
2368 switch (uExitReason)
2369 {
2370 case VMX_EXIT_GDTR_IDTR_ACCESS:
2371 {
2372 Assert(VMXINSTRID_IS_VALID(uInstrId));
2373 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
2374 ExitInstrInfo.GdtIdt.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
2375 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
2376 break;
2377 }
2378
2379 case VMX_EXIT_LDTR_TR_ACCESS:
2380 {
2381 Assert(VMXINSTRID_IS_VALID(uInstrId));
2382 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
2383 ExitInstrInfo.LdtTr.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
2384 ExitInstrInfo.LdtTr.u2Undef0 = 0;
2385 break;
2386 }
2387
2388 case VMX_EXIT_RDRAND:
2389 case VMX_EXIT_RDSEED:
2390 {
2391 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
2392 break;
2393 }
2394 }
2395
2396 /* Update displacement and return the constructed VM-exit instruction information field. */
2397 if (pGCPtrDisp)
2398 *pGCPtrDisp = GCPtrDisp;
2399
2400 return ExitInstrInfo.u;
2401}
2402
2403
2404/**
2405 * VMX VM-exit handler.
2406 *
2407 * @returns Strict VBox status code.
2408 * @retval VINF_VMX_VMEXIT when the VM-exit is successful.
2409 * @retval VINF_EM_TRIPLE_FAULT when VM-exit is unsuccessful and leads to a
2410 * triple-fault.
2411 *
2412 * @param pVCpu The cross context virtual CPU structure.
2413 * @param uExitReason The VM-exit reason.
2414 * @param u64ExitQual The Exit qualification.
2415 */
2416IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
2417{
2418# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
2419 RT_NOREF3(pVCpu, uExitReason, u64ExitQual);
2420 AssertMsgFailed(("VM-exit should only be invoked from ring-3 when nested-guest executes only in ring-3!\n"));
2421 return VERR_IEM_IPE_7;
2422# else
2423 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
2424
2425 /*
2426 * Import all the guest-CPU state.
2427 *
2428 * HM on returning to guest execution would have to reset up a whole lot of state
2429 * anyway, (e.g., VM-entry/VM-exit controls) and we do not ever import a part of
2430 * the state and flag reloading the entire state on re-entry. So import the entire
2431 * state here, see HMNotifyVmxNstGstVmexit() for more comments.
2432 */
2433 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL);
2434
2435 /*
2436 * Ensure VM-entry interruption information valid bit is cleared.
2437 *
2438 * We do it here on every VM-exit so that even premature VM-exits (e.g. those caused
2439 * by invalid-guest state or machine-check exceptions) also clear this bit.
2440 *
2441 * See Intel spec. 27.2 "Recording VM-exit Information And Updating VM-entry control fields".
2442 */
2443 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
2444 pVmcs->u32EntryIntInfo &= ~VMX_ENTRY_INT_INFO_VALID;
2445
2446 /*
2447 * Update the VM-exit reason and Exit qualification.
2448 * Other VMCS read-only data fields are expected to be updated by the caller already.
2449 */
2450 pVmcs->u32RoExitReason = uExitReason;
2451 pVmcs->u64RoExitQual.u = u64ExitQual;
2452
2453 Log3(("vmexit: reason=%#RX32 qual=%#RX64 cs:rip=%04x:%#RX64 cr0=%#RX64 cr3=%#RX64 cr4=%#RX64\n", uExitReason,
2454 pVmcs->u64RoExitQual.u, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.cr0,
2455 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4));
2456
2457 /*
2458 * Update the IDT-vectoring information fields if the VM-exit is triggered during delivery of an event.
2459 * See Intel spec. 27.2.4 "Information for VM Exits During Event Delivery".
2460 */
2461 {
2462 uint8_t uVector;
2463 uint32_t fFlags;
2464 uint32_t uErrCode;
2465 bool const fInEventDelivery = IEMGetCurrentXcpt(pVCpu, &uVector, &fFlags, &uErrCode, NULL /* puCr2 */);
2466 if (fInEventDelivery)
2467 {
2468 /*
2469 * A VM-exit is not considered to occur during event delivery when the VM-exit is
2470 * caused by a triple-fault or the original event results in a double-fault that
2471 * causes the VM exit directly (exception bitmap). Therefore, we must not set the
2472 * original event information into the IDT-vectoring information fields.
2473 *
2474 * See Intel spec. 27.2.4 "Information for VM Exits During Event Delivery".
2475 */
2476 if ( uExitReason != VMX_EXIT_TRIPLE_FAULT
2477 && ( uExitReason != VMX_EXIT_XCPT_OR_NMI
2478 || !VMX_EXIT_INT_INFO_IS_XCPT_DF(pVmcs->u32RoExitIntInfo)))
2479 {
2480 uint8_t const uIdtVectoringType = iemVmxGetEventType(uVector, fFlags);
2481 uint8_t const fErrCodeValid = RT_BOOL(fFlags & IEM_XCPT_FLAGS_ERR);
2482 uint32_t const uIdtVectoringInfo = RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_VECTOR, uVector)
2483 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_TYPE, uIdtVectoringType)
2484 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_ERR_CODE_VALID, fErrCodeValid)
2485 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_VALID, 1);
2486 iemVmxVmcsSetIdtVectoringInfo(pVCpu, uIdtVectoringInfo);
2487 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, uErrCode);
2488 LogFlow(("vmexit: idt_info=%#RX32 idt_err_code=%#RX32 cr2=%#RX64\n", uIdtVectoringInfo, uErrCode,
2489 pVCpu->cpum.GstCtx.cr2));
2490 }
2491 }
2492 }
2493
2494 /* The following VMCS fields should always be zero since we don't support injecting SMIs into a guest. */
2495 Assert(pVmcs->u64RoIoRcx.u == 0);
2496 Assert(pVmcs->u64RoIoRsi.u == 0);
2497 Assert(pVmcs->u64RoIoRdi.u == 0);
2498 Assert(pVmcs->u64RoIoRip.u == 0);
2499
2500 /* We should not cause an NMI-window/interrupt-window VM-exit when injecting events as part of VM-entry. */
2501 if (!CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx))
2502 {
2503 Assert(uExitReason != VMX_EXIT_NMI_WINDOW);
2504 Assert(uExitReason != VMX_EXIT_INT_WINDOW);
2505 }
2506
2507 /* For exception or NMI VM-exits the VM-exit interruption info. field must be valid. */
2508 Assert(uExitReason != VMX_EXIT_XCPT_OR_NMI || VMX_EXIT_INT_INFO_IS_VALID(pVmcs->u32RoExitIntInfo));
2509
2510 /*
2511 * Save the guest state back into the VMCS.
2512 * We only need to save the state when the VM-entry was successful.
2513 */
2514 bool const fVmentryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
2515 if (!fVmentryFailed)
2516 {
2517 /*
2518 * If we support storing EFER.LMA into IA32e-mode guest field on VM-exit, we need to do that now.
2519 * See Intel spec. 27.2 "Recording VM-exit Information And Updating VM-entry Control".
2520 *
2521 * It is not clear from the Intel spec. if this is done only when VM-entry succeeds.
2522 * If a VM-exit happens before loading guest EFER, we risk restoring the host EFER.LMA
2523 * as guest-CPU state would not been modified. Hence for now, we do this only when
2524 * the VM-entry succeeded.
2525 */
2526 /** @todo r=ramshankar: Figure out if this bit gets set to host EFER.LMA on real
2527 * hardware when VM-exit fails during VM-entry (e.g. VERR_VMX_INVALID_GUEST_STATE). */
2528 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxExitSaveEferLma)
2529 {
2530 if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
2531 pVmcs->u32EntryCtls |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
2532 else
2533 pVmcs->u32EntryCtls &= ~VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
2534 }
2535
2536 /*
2537 * The rest of the high bits of the VM-exit reason are only relevant when the VM-exit
2538 * occurs in enclave mode/SMM which we don't support yet.
2539 *
2540 * If we ever add support for it, we can pass just the lower bits to the functions
2541 * below, till then an assert should suffice.
2542 */
2543 Assert(!RT_HI_U16(uExitReason));
2544
2545 /* Save the guest state into the VMCS and restore guest MSRs from the auto-store guest MSR area. */
2546 iemVmxVmexitSaveGuestState(pVCpu, uExitReason);
2547 int rc = iemVmxVmexitSaveGuestAutoMsrs(pVCpu, uExitReason);
2548 if (RT_SUCCESS(rc))
2549 { /* likely */ }
2550 else
2551 return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS);
2552
2553 /* Clear any saved NMI-blocking state so we don't assert on next VM-entry (if it was in effect on the previous one). */
2554 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions &= ~VMCPU_FF_BLOCK_NMIS;
2555 }
2556 else
2557 {
2558 /* Restore the NMI-blocking state if VM-entry failed due to invalid guest state or while loading MSRs. */
2559 uint32_t const uExitReasonBasic = VMX_EXIT_REASON_BASIC(uExitReason);
2560 if ( uExitReasonBasic == VMX_EXIT_ERR_INVALID_GUEST_STATE
2561 || uExitReasonBasic == VMX_EXIT_ERR_MSR_LOAD)
2562 iemVmxVmexitRestoreNmiBlockingFF(pVCpu);
2563 }
2564
2565 /*
2566 * Stop any running VMX-preemption timer if necessary.
2567 */
2568 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
2569 CPUMStopGuestVmxPremptTimer(pVCpu);
2570
2571 /*
2572 * Clear any pending VMX nested-guest force-flags.
2573 * These force-flags have no effect on (outer) guest execution and will
2574 * be re-evaluated and setup on the next nested-guest VM-entry.
2575 */
2576 VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_VMX_ALL_MASK);
2577
2578 /*
2579 * We're no longer in nested-guest execution mode.
2580 *
2581 * It is important to do this prior to loading the host state because
2582 * PGM looks at fInVmxNonRootMode to determine if it needs to perform
2583 * second-level address translation while switching to host CR3.
2584 */
2585 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
2586
2587 /* Restore the host (outer guest) state. */
2588 VBOXSTRICTRC rcStrict = iemVmxVmexitLoadHostState(pVCpu, uExitReason);
2589 if (RT_SUCCESS(rcStrict))
2590 {
2591 Assert(rcStrict == VINF_SUCCESS);
2592 rcStrict = VINF_VMX_VMEXIT;
2593 }
2594 else
2595 Log3(("vmexit: Loading host-state failed. uExitReason=%u rc=%Rrc\n", uExitReason, VBOXSTRICTRC_VAL(rcStrict)));
2596
2597 /* Notify HM that the current VMCS fields have been modified. */
2598 HMNotifyVmxNstGstCurrentVmcsChanged(pVCpu);
2599
2600 /* Notify HM that we've completed the VM-exit. */
2601 HMNotifyVmxNstGstVmexit(pVCpu);
2602
2603# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
2604 /* Revert any IEM-only nested-guest execution policy, otherwise return rcStrict. */
2605 Log(("vmexit: Disabling IEM-only EM execution policy!\n"));
2606 int rcSched = EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
2607 if (rcSched != VINF_SUCCESS)
2608 iemSetPassUpStatus(pVCpu, rcSched);
2609# endif
2610 return rcStrict;
2611# endif
2612}
2613
2614
2615/**
2616 * VMX VM-exit handler for VM-exits due to instruction execution.
2617 *
2618 * This is intended for instructions where the caller provides all the relevant
2619 * VM-exit information.
2620 *
2621 * @returns Strict VBox status code.
2622 * @param pVCpu The cross context virtual CPU structure.
2623 * @param pExitInfo Pointer to the VM-exit information.
2624 */
2625IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
2626{
2627 /*
2628 * For instructions where any of the following fields are not applicable:
2629 * - Exit qualification must be cleared.
2630 * - VM-exit instruction info. is undefined.
2631 * - Guest-linear address is undefined.
2632 * - Guest-physical address is undefined.
2633 *
2634 * The VM-exit instruction length is mandatory for all VM-exits that are caused by
2635 * instruction execution. For VM-exits that are not due to instruction execution this
2636 * field is undefined.
2637 *
2638 * In our implementation in IEM, all undefined fields are generally cleared. However,
2639 * if the caller supplies information (from say the physical CPU directly) it is
2640 * then possible that the undefined fields are not cleared.
2641 *
2642 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2643 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
2644 */
2645 Assert(pExitInfo);
2646 AssertMsg(pExitInfo->uReason <= VMX_EXIT_MAX, ("uReason=%u\n", pExitInfo->uReason));
2647 AssertMsg(pExitInfo->cbInstr >= 1 && pExitInfo->cbInstr <= 15,
2648 ("uReason=%u cbInstr=%u\n", pExitInfo->uReason, pExitInfo->cbInstr));
2649
2650 /* Update all the relevant fields from the VM-exit instruction information struct. */
2651 iemVmxVmcsSetExitInstrInfo(pVCpu, pExitInfo->InstrInfo.u);
2652 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, pExitInfo->u64GuestLinearAddr);
2653 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, pExitInfo->u64GuestPhysAddr);
2654 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
2655
2656 /* Perform the VM-exit. */
2657 return iemVmxVmexit(pVCpu, pExitInfo->uReason, pExitInfo->u64Qual);
2658}
2659
2660
2661/**
2662 * VMX VM-exit handler for VM-exits due to instruction execution.
2663 *
2664 * This is intended for instructions that only provide the VM-exit instruction
2665 * length.
2666 *
2667 * @param pVCpu The cross context virtual CPU structure.
2668 * @param uExitReason The VM-exit reason.
2669 * @param cbInstr The instruction length in bytes.
2670 */
2671IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr)
2672{
2673 VMXVEXITINFO ExitInfo;
2674 RT_ZERO(ExitInfo);
2675 ExitInfo.uReason = uExitReason;
2676 ExitInfo.cbInstr = cbInstr;
2677
2678#ifdef VBOX_STRICT
2679 /*
2680 * To prevent us from shooting ourselves in the foot.
2681 * The follow instructions should convey more than just the instruction length.
2682 */
2683 switch (uExitReason)
2684 {
2685 case VMX_EXIT_INVEPT:
2686 case VMX_EXIT_INVPCID:
2687 case VMX_EXIT_INVVPID:
2688 case VMX_EXIT_LDTR_TR_ACCESS:
2689 case VMX_EXIT_GDTR_IDTR_ACCESS:
2690 case VMX_EXIT_VMCLEAR:
2691 case VMX_EXIT_VMPTRLD:
2692 case VMX_EXIT_VMPTRST:
2693 case VMX_EXIT_VMREAD:
2694 case VMX_EXIT_VMWRITE:
2695 case VMX_EXIT_VMXON:
2696 case VMX_EXIT_XRSTORS:
2697 case VMX_EXIT_XSAVES:
2698 case VMX_EXIT_RDRAND:
2699 case VMX_EXIT_RDSEED:
2700 case VMX_EXIT_IO_INSTR:
2701 AssertMsgFailedReturn(("Use iemVmxVmexitInstrNeedsInfo for uExitReason=%u\n", uExitReason), VERR_IEM_IPE_5);
2702 break;
2703 }
2704#endif
2705
2706 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2707}
2708
2709
2710/**
2711 * VMX VM-exit handler for VM-exits due to instruction execution.
2712 *
2713 * This is intended for instructions that have a ModR/M byte and update the VM-exit
2714 * instruction information and Exit qualification fields.
2715 *
2716 * @param pVCpu The cross context virtual CPU structure.
2717 * @param uExitReason The VM-exit reason.
2718 * @param uInstrid The instruction identity (VMXINSTRID_XXX).
2719 * @param cbInstr The instruction length in bytes.
2720 *
2721 * @remarks Do not use this for INS/OUTS instruction.
2722 */
2723IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPUCC pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr)
2724{
2725 VMXVEXITINFO ExitInfo;
2726 RT_ZERO(ExitInfo);
2727 ExitInfo.uReason = uExitReason;
2728 ExitInfo.cbInstr = cbInstr;
2729
2730 /*
2731 * Update the Exit qualification field with displacement bytes.
2732 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2733 */
2734 switch (uExitReason)
2735 {
2736 case VMX_EXIT_INVEPT:
2737 case VMX_EXIT_INVPCID:
2738 case VMX_EXIT_INVVPID:
2739 case VMX_EXIT_LDTR_TR_ACCESS:
2740 case VMX_EXIT_GDTR_IDTR_ACCESS:
2741 case VMX_EXIT_VMCLEAR:
2742 case VMX_EXIT_VMPTRLD:
2743 case VMX_EXIT_VMPTRST:
2744 case VMX_EXIT_VMREAD:
2745 case VMX_EXIT_VMWRITE:
2746 case VMX_EXIT_VMXON:
2747 case VMX_EXIT_XRSTORS:
2748 case VMX_EXIT_XSAVES:
2749 case VMX_EXIT_RDRAND:
2750 case VMX_EXIT_RDSEED:
2751 {
2752 /* Construct the VM-exit instruction information. */
2753 RTGCPTR GCPtrDisp;
2754 uint32_t const uInstrInfo = iemVmxGetExitInstrInfo(pVCpu, uExitReason, uInstrId, &GCPtrDisp);
2755
2756 /* Update the VM-exit instruction information. */
2757 ExitInfo.InstrInfo.u = uInstrInfo;
2758
2759 /* Update the Exit qualification. */
2760 ExitInfo.u64Qual = GCPtrDisp;
2761 break;
2762 }
2763
2764 default:
2765 AssertMsgFailedReturn(("Use instruction-specific handler\n"), VERR_IEM_IPE_5);
2766 break;
2767 }
2768
2769 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2770}
2771
2772
2773/**
2774 * VMX VM-exit handler for VM-exits due to INVLPG.
2775 *
2776 * @returns Strict VBox status code.
2777 * @param pVCpu The cross context virtual CPU structure.
2778 * @param GCPtrPage The guest-linear address of the page being invalidated.
2779 * @param cbInstr The instruction length in bytes.
2780 */
2781IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrInvlpg(PVMCPUCC pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr)
2782{
2783 VMXVEXITINFO ExitInfo;
2784 RT_ZERO(ExitInfo);
2785 ExitInfo.uReason = VMX_EXIT_INVLPG;
2786 ExitInfo.cbInstr = cbInstr;
2787 ExitInfo.u64Qual = GCPtrPage;
2788 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(ExitInfo.u64Qual));
2789
2790 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2791}
2792
2793
2794/**
2795 * VMX VM-exit handler for VM-exits due to LMSW.
2796 *
2797 * @returns Strict VBox status code.
2798 * @param pVCpu The cross context virtual CPU structure.
2799 * @param uGuestCr0 The current guest CR0.
2800 * @param pu16NewMsw The machine-status word specified in LMSW's source
2801 * operand. This will be updated depending on the VMX
2802 * guest/host CR0 mask if LMSW is not intercepted.
2803 * @param GCPtrEffDst The guest-linear address of the source operand in case
2804 * of a memory operand. For register operand, pass
2805 * NIL_RTGCPTR.
2806 * @param cbInstr The instruction length in bytes.
2807 */
2808IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPUCC pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw, RTGCPTR GCPtrEffDst,
2809 uint8_t cbInstr)
2810{
2811 Assert(pu16NewMsw);
2812
2813 uint16_t const uNewMsw = *pu16NewMsw;
2814 if (CPUMIsGuestVmxLmswInterceptSet(&pVCpu->cpum.GstCtx, uNewMsw))
2815 {
2816 Log2(("lmsw: Guest intercept -> VM-exit\n"));
2817
2818 VMXVEXITINFO ExitInfo;
2819 RT_ZERO(ExitInfo);
2820 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
2821 ExitInfo.cbInstr = cbInstr;
2822
2823 bool const fMemOperand = RT_BOOL(GCPtrEffDst != NIL_RTGCPTR);
2824 if (fMemOperand)
2825 {
2826 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(GCPtrEffDst));
2827 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
2828 }
2829
2830 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
2831 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_LMSW)
2832 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_OP, fMemOperand)
2833 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_DATA, uNewMsw);
2834
2835 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2836 }
2837
2838 /*
2839 * If LMSW did not cause a VM-exit, any CR0 bits in the range 0:3 that is set in the
2840 * CR0 guest/host mask must be left unmodified.
2841 *
2842 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
2843 */
2844 uint32_t const fGstHostMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u;
2845 uint32_t const fGstHostLmswMask = fGstHostMask & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
2846 *pu16NewMsw = (uGuestCr0 & fGstHostLmswMask) | (uNewMsw & ~fGstHostLmswMask);
2847
2848 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
2849}
2850
2851
2852/**
2853 * VMX VM-exit handler for VM-exits due to CLTS.
2854 *
2855 * @returns Strict VBox status code.
2856 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the CLTS instruction did not cause a
2857 * VM-exit but must not modify the guest CR0.TS bit.
2858 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the CLTS instruction did not cause a
2859 * VM-exit and modification to the guest CR0.TS bit is allowed (subject to
2860 * CR0 fixed bits in VMX operation).
2861 * @param pVCpu The cross context virtual CPU structure.
2862 * @param cbInstr The instruction length in bytes.
2863 */
2864IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPUCC pVCpu, uint8_t cbInstr)
2865{
2866 uint32_t const fGstHostMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u;
2867 uint32_t const fReadShadow = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u;
2868
2869 /*
2870 * If CR0.TS is owned by the host:
2871 * - If CR0.TS is set in the read-shadow, we must cause a VM-exit.
2872 * - If CR0.TS is cleared in the read-shadow, no VM-exit is caused and the
2873 * CLTS instruction completes without clearing CR0.TS.
2874 *
2875 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2876 */
2877 if (fGstHostMask & X86_CR0_TS)
2878 {
2879 if (fReadShadow & X86_CR0_TS)
2880 {
2881 Log2(("clts: Guest intercept -> VM-exit\n"));
2882
2883 VMXVEXITINFO ExitInfo;
2884 RT_ZERO(ExitInfo);
2885 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
2886 ExitInfo.cbInstr = cbInstr;
2887 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
2888 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_CLTS);
2889 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2890 }
2891
2892 return VINF_VMX_MODIFIES_BEHAVIOR;
2893 }
2894
2895 /*
2896 * If CR0.TS is not owned by the host, the CLTS instructions operates normally
2897 * and may modify CR0.TS (subject to CR0 fixed bits in VMX operation).
2898 */
2899 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
2900}
2901
2902
2903/**
2904 * VMX VM-exit handler for VM-exits due to 'Mov CR0,GReg' and 'Mov CR4,GReg'
2905 * (CR0/CR4 write).
2906 *
2907 * @returns Strict VBox status code.
2908 * @param pVCpu The cross context virtual CPU structure.
2909 * @param iCrReg The control register (either CR0 or CR4).
2910 * @param uGuestCrX The current guest CR0/CR4.
2911 * @param puNewCrX Pointer to the new CR0/CR4 value. Will be updated if no
2912 * VM-exit is caused.
2913 * @param iGReg The general register from which the CR0/CR4 value is being
2914 * loaded.
2915 * @param cbInstr The instruction length in bytes.
2916 */
2917IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr0Cr4(PVMCPUCC pVCpu, uint8_t iCrReg, uint64_t *puNewCrX, uint8_t iGReg,
2918 uint8_t cbInstr)
2919{
2920 Assert(puNewCrX);
2921 Assert(iCrReg == 0 || iCrReg == 4);
2922 Assert(iGReg < X86_GREG_COUNT);
2923
2924 uint64_t const uNewCrX = *puNewCrX;
2925 if (CPUMIsGuestVmxMovToCr0Cr4InterceptSet(&pVCpu->cpum.GstCtx, iCrReg, uNewCrX))
2926 {
2927 Log2(("mov_Cr_Rd: (CR%u) Guest intercept -> VM-exit\n", iCrReg));
2928
2929 VMXVEXITINFO ExitInfo;
2930 RT_ZERO(ExitInfo);
2931 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
2932 ExitInfo.cbInstr = cbInstr;
2933 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, iCrReg)
2934 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
2935 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
2936 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2937 }
2938
2939 /*
2940 * If the Mov-to-CR0/CR4 did not cause a VM-exit, any bits owned by the host
2941 * must not be modified the instruction.
2942 *
2943 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
2944 */
2945 uint64_t uGuestCrX;
2946 uint64_t fGstHostMask;
2947 if (iCrReg == 0)
2948 {
2949 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2950 uGuestCrX = pVCpu->cpum.GstCtx.cr0;
2951 fGstHostMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr0Mask.u;
2952 }
2953 else
2954 {
2955 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2956 uGuestCrX = pVCpu->cpum.GstCtx.cr4;
2957 fGstHostMask = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64Cr4Mask.u;
2958 }
2959
2960 *puNewCrX = (uGuestCrX & fGstHostMask) | (*puNewCrX & ~fGstHostMask);
2961 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
2962}
2963
2964
2965/**
2966 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR3' (CR3 read).
2967 *
2968 * @returns VBox strict status code.
2969 * @param pVCpu The cross context virtual CPU structure.
2970 * @param iGReg The general register to which the CR3 value is being stored.
2971 * @param cbInstr The instruction length in bytes.
2972 */
2973IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr)
2974{
2975 Assert(iGReg < X86_GREG_COUNT);
2976 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
2977
2978 /*
2979 * If the CR3-store exiting control is set, we must cause a VM-exit.
2980 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2981 */
2982 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT)
2983 {
2984 Log2(("mov_Rd_Cr: (CR3) Guest intercept -> VM-exit\n"));
2985
2986 VMXVEXITINFO ExitInfo;
2987 RT_ZERO(ExitInfo);
2988 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
2989 ExitInfo.cbInstr = cbInstr;
2990 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
2991 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ)
2992 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
2993 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2994 }
2995
2996 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
2997}
2998
2999
3000/**
3001 * VMX VM-exit handler for VM-exits due to 'Mov CR3,GReg' (CR3 write).
3002 *
3003 * @returns VBox strict status code.
3004 * @param pVCpu The cross context virtual CPU structure.
3005 * @param uNewCr3 The new CR3 value.
3006 * @param iGReg The general register from which the CR3 value is being
3007 * loaded.
3008 * @param cbInstr The instruction length in bytes.
3009 */
3010IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr3(PVMCPUCC pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr)
3011{
3012 Assert(iGReg < X86_GREG_COUNT);
3013
3014 /*
3015 * If the CR3-load exiting control is set and the new CR3 value does not
3016 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
3017 *
3018 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3019 */
3020 if (CPUMIsGuestVmxMovToCr3InterceptSet(pVCpu, uNewCr3))
3021 {
3022 Log2(("mov_Cr_Rd: (CR3) Guest intercept -> VM-exit\n"));
3023
3024 VMXVEXITINFO ExitInfo;
3025 RT_ZERO(ExitInfo);
3026 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3027 ExitInfo.cbInstr = cbInstr;
3028 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
3029 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3030 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3031 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3032 }
3033
3034 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3035}
3036
3037
3038/**
3039 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR8' (CR8 read).
3040 *
3041 * @returns VBox strict status code.
3042 * @param pVCpu The cross context virtual CPU structure.
3043 * @param iGReg The general register to which the CR8 value is being stored.
3044 * @param cbInstr The instruction length in bytes.
3045 */
3046IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr)
3047{
3048 Assert(iGReg < X86_GREG_COUNT);
3049
3050 /*
3051 * If the CR8-store exiting control is set, we must cause a VM-exit.
3052 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3053 */
3054 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT)
3055 {
3056 Log2(("mov_Rd_Cr: (CR8) Guest intercept -> VM-exit\n"));
3057
3058 VMXVEXITINFO ExitInfo;
3059 RT_ZERO(ExitInfo);
3060 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3061 ExitInfo.cbInstr = cbInstr;
3062 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
3063 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ)
3064 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3065 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3066 }
3067
3068 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3069}
3070
3071
3072/**
3073 * VMX VM-exit handler for VM-exits due to 'Mov CR8,GReg' (CR8 write).
3074 *
3075 * @returns VBox strict status code.
3076 * @param pVCpu The cross context virtual CPU structure.
3077 * @param iGReg The general register from which the CR8 value is being
3078 * loaded.
3079 * @param cbInstr The instruction length in bytes.
3080 */
3081IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr)
3082{
3083 Assert(iGReg < X86_GREG_COUNT);
3084
3085 /*
3086 * If the CR8-load exiting control is set, we must cause a VM-exit.
3087 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3088 */
3089 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT)
3090 {
3091 Log2(("mov_Cr_Rd: (CR8) Guest intercept -> VM-exit\n"));
3092
3093 VMXVEXITINFO ExitInfo;
3094 RT_ZERO(ExitInfo);
3095 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3096 ExitInfo.cbInstr = cbInstr;
3097 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
3098 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3099 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3100 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3101 }
3102
3103 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3104}
3105
3106
3107/**
3108 * VMX VM-exit handler for VM-exits due to 'Mov DRx,GReg' (DRx write) and 'Mov
3109 * GReg,DRx' (DRx read).
3110 *
3111 * @returns VBox strict status code.
3112 * @param pVCpu The cross context virtual CPU structure.
3113 * @param uInstrid The instruction identity (VMXINSTRID_MOV_TO_DRX or
3114 * VMXINSTRID_MOV_FROM_DRX).
3115 * @param iDrReg The debug register being accessed.
3116 * @param iGReg The general register to/from which the DRx value is being
3117 * store/loaded.
3118 * @param cbInstr The instruction length in bytes.
3119 */
3120IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovDrX(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint8_t iDrReg, uint8_t iGReg,
3121 uint8_t cbInstr)
3122{
3123 Assert(iDrReg <= 7);
3124 Assert(uInstrId == VMXINSTRID_MOV_TO_DRX || uInstrId == VMXINSTRID_MOV_FROM_DRX);
3125 Assert(iGReg < X86_GREG_COUNT);
3126
3127 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT)
3128 {
3129 uint32_t const uDirection = uInstrId == VMXINSTRID_MOV_TO_DRX ? VMX_EXIT_QUAL_DRX_DIRECTION_WRITE
3130 : VMX_EXIT_QUAL_DRX_DIRECTION_READ;
3131 VMXVEXITINFO ExitInfo;
3132 RT_ZERO(ExitInfo);
3133 ExitInfo.uReason = VMX_EXIT_MOV_DRX;
3134 ExitInfo.cbInstr = cbInstr;
3135 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_REGISTER, iDrReg)
3136 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_DIRECTION, uDirection)
3137 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_GENREG, iGReg);
3138 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3139 }
3140
3141 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3142}
3143
3144
3145/**
3146 * VMX VM-exit handler for VM-exits due to I/O instructions (IN and OUT).
3147 *
3148 * @returns VBox strict status code.
3149 * @param pVCpu The cross context virtual CPU structure.
3150 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_IO_IN or
3151 * VMXINSTRID_IO_OUT).
3152 * @param u16Port The I/O port being accessed.
3153 * @param fImm Whether the I/O port was encoded using an immediate operand
3154 * or the implicit DX register.
3155 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3156 * @param cbInstr The instruction length in bytes.
3157 */
3158IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, bool fImm, uint8_t cbAccess,
3159 uint8_t cbInstr)
3160{
3161 Assert(uInstrId == VMXINSTRID_IO_IN || uInstrId == VMXINSTRID_IO_OUT);
3162 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3163
3164 bool const fIntercept = CPUMIsGuestVmxIoInterceptSet(pVCpu, u16Port, cbAccess);
3165 if (fIntercept)
3166 {
3167 uint32_t const uDirection = uInstrId == VMXINSTRID_IO_IN ? VMX_EXIT_QUAL_IO_DIRECTION_IN
3168 : VMX_EXIT_QUAL_IO_DIRECTION_OUT;
3169 VMXVEXITINFO ExitInfo;
3170 RT_ZERO(ExitInfo);
3171 ExitInfo.uReason = VMX_EXIT_IO_INSTR;
3172 ExitInfo.cbInstr = cbInstr;
3173 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_WIDTH, cbAccess - 1)
3174 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_DIRECTION, uDirection)
3175 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_ENCODING, fImm)
3176 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_PORT, u16Port);
3177 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3178 }
3179
3180 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3181}
3182
3183
3184/**
3185 * VMX VM-exit handler for VM-exits due to string I/O instructions (INS and OUTS).
3186 *
3187 * @returns VBox strict status code.
3188 * @param pVCpu The cross context virtual CPU structure.
3189 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_IO_INS or
3190 * VMXINSTRID_IO_OUTS).
3191 * @param u16Port The I/O port being accessed.
3192 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3193 * @param fRep Whether the instruction has a REP prefix or not.
3194 * @param ExitInstrInfo The VM-exit instruction info. field.
3195 * @param cbInstr The instruction length in bytes.
3196 */
3197IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrStrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, uint8_t cbAccess, bool fRep,
3198 VMXEXITINSTRINFO ExitInstrInfo, uint8_t cbInstr)
3199{
3200 Assert(uInstrId == VMXINSTRID_IO_INS || uInstrId == VMXINSTRID_IO_OUTS);
3201 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3202 Assert(ExitInstrInfo.StrIo.iSegReg < X86_SREG_COUNT);
3203 Assert(ExitInstrInfo.StrIo.u3AddrSize == 0 || ExitInstrInfo.StrIo.u3AddrSize == 1 || ExitInstrInfo.StrIo.u3AddrSize == 2);
3204 Assert(uInstrId != VMXINSTRID_IO_INS || ExitInstrInfo.StrIo.iSegReg == X86_SREG_ES);
3205
3206 bool const fIntercept = CPUMIsGuestVmxIoInterceptSet(pVCpu, u16Port, cbAccess);
3207 if (fIntercept)
3208 {
3209 /*
3210 * Figure out the guest-linear address and the direction bit (INS/OUTS).
3211 */
3212 /** @todo r=ramshankar: Is there something in IEM that already does this? */
3213 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
3214 uint8_t const iSegReg = ExitInstrInfo.StrIo.iSegReg;
3215 uint8_t const uAddrSize = ExitInstrInfo.StrIo.u3AddrSize;
3216 uint64_t const uAddrSizeMask = s_auAddrSizeMasks[uAddrSize];
3217
3218 uint32_t uDirection;
3219 uint64_t uGuestLinearAddr;
3220 if (uInstrId == VMXINSTRID_IO_INS)
3221 {
3222 uDirection = VMX_EXIT_QUAL_IO_DIRECTION_IN;
3223 uGuestLinearAddr = pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base + (pVCpu->cpum.GstCtx.rdi & uAddrSizeMask);
3224 }
3225 else
3226 {
3227 uDirection = VMX_EXIT_QUAL_IO_DIRECTION_OUT;
3228 uGuestLinearAddr = pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base + (pVCpu->cpum.GstCtx.rsi & uAddrSizeMask);
3229 }
3230
3231 /*
3232 * If the segment is unusable, the guest-linear address in undefined.
3233 * We shall clear it for consistency.
3234 *
3235 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
3236 */
3237 if (pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable)
3238 uGuestLinearAddr = 0;
3239
3240 VMXVEXITINFO ExitInfo;
3241 RT_ZERO(ExitInfo);
3242 ExitInfo.uReason = VMX_EXIT_IO_INSTR;
3243 ExitInfo.cbInstr = cbInstr;
3244 ExitInfo.u64GuestLinearAddr = uGuestLinearAddr;
3245 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_WIDTH, cbAccess - 1)
3246 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_DIRECTION, uDirection)
3247 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_IS_STRING, 1)
3248 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_IS_REP, fRep)
3249 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_ENCODING, VMX_EXIT_QUAL_IO_ENCODING_DX)
3250 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_PORT, u16Port);
3251 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxInsOutInfo)
3252 ExitInfo.InstrInfo = ExitInstrInfo;
3253 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3254 }
3255
3256 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3257}
3258
3259
3260/**
3261 * VMX VM-exit handler for VM-exits due to MWAIT.
3262 *
3263 * @returns VBox strict status code.
3264 * @param pVCpu The cross context virtual CPU structure.
3265 * @param fMonitorHwArmed Whether the address-range monitor hardware is armed.
3266 * @param cbInstr The instruction length in bytes.
3267 */
3268IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMwait(PVMCPUCC pVCpu, bool fMonitorHwArmed, uint8_t cbInstr)
3269{
3270 VMXVEXITINFO ExitInfo;
3271 RT_ZERO(ExitInfo);
3272 ExitInfo.uReason = VMX_EXIT_MWAIT;
3273 ExitInfo.cbInstr = cbInstr;
3274 ExitInfo.u64Qual = fMonitorHwArmed;
3275 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3276}
3277
3278
3279/**
3280 * VMX VM-exit handler for VM-exits due to PAUSE.
3281 *
3282 * @returns VBox strict status code.
3283 * @param pVCpu The cross context virtual CPU structure.
3284 * @param cbInstr The instruction length in bytes.
3285 */
3286IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrPause(PVMCPUCC pVCpu, uint8_t cbInstr)
3287{
3288 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
3289
3290 /*
3291 * The PAUSE VM-exit is controlled by the "PAUSE exiting" control and the
3292 * "PAUSE-loop exiting" control.
3293 *
3294 * The PLE-Gap is the maximum number of TSC ticks between two successive executions of
3295 * the PAUSE instruction before we cause a VM-exit. The PLE-Window is the maximum amount
3296 * of TSC ticks the guest is allowed to execute in a pause loop before we must cause
3297 * a VM-exit.
3298 *
3299 * See Intel spec. 24.6.13 "Controls for PAUSE-Loop Exiting".
3300 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3301 */
3302 bool fIntercept = false;
3303 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_PAUSE_EXIT)
3304 fIntercept = true;
3305 else if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)
3306 && pVCpu->iem.s.uCpl == 0)
3307 {
3308 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
3309
3310 /*
3311 * A previous-PAUSE-tick value of 0 is used to identify the first time
3312 * execution of a PAUSE instruction after VM-entry at CPL 0. We must
3313 * consider this to be the first execution of PAUSE in a loop according
3314 * to the Intel.
3315 *
3316 * All subsequent records for the previous-PAUSE-tick we ensure that it
3317 * cannot be zero by OR'ing 1 to rule out the TSC wrap-around cases at 0.
3318 */
3319 uint64_t *puFirstPauseLoopTick = &pVCpu->cpum.GstCtx.hwvirt.vmx.uFirstPauseLoopTick;
3320 uint64_t *puPrevPauseTick = &pVCpu->cpum.GstCtx.hwvirt.vmx.uPrevPauseTick;
3321 uint64_t const uTick = TMCpuTickGet(pVCpu);
3322 uint32_t const uPleGap = pVmcs->u32PleGap;
3323 uint32_t const uPleWindow = pVmcs->u32PleWindow;
3324 if ( *puPrevPauseTick == 0
3325 || uTick - *puPrevPauseTick > uPleGap)
3326 *puFirstPauseLoopTick = uTick;
3327 else if (uTick - *puFirstPauseLoopTick > uPleWindow)
3328 fIntercept = true;
3329
3330 *puPrevPauseTick = uTick | 1;
3331 }
3332
3333 if (fIntercept)
3334 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_PAUSE, cbInstr);
3335
3336 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3337}
3338
3339
3340/**
3341 * VMX VM-exit handler for VM-exits due to task switches.
3342 *
3343 * @returns VBox strict status code.
3344 * @param pVCpu The cross context virtual CPU structure.
3345 * @param enmTaskSwitch The cause of the task switch.
3346 * @param SelNewTss The selector of the new TSS.
3347 * @param cbInstr The instruction length in bytes.
3348 */
3349IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr)
3350{
3351 /*
3352 * Task-switch VM-exits are unconditional and provide the Exit qualification.
3353 *
3354 * If the cause of the task switch is due to execution of CALL, IRET or the JMP
3355 * instruction or delivery of the exception generated by one of these instructions
3356 * lead to a task switch through a task gate in the IDT, we need to provide the
3357 * VM-exit instruction length. Any other means of invoking a task switch VM-exit
3358 * leaves the VM-exit instruction length field undefined.
3359 *
3360 * See Intel spec. 25.2 "Other Causes Of VM Exits".
3361 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
3362 */
3363 Assert(cbInstr <= 15);
3364
3365 uint8_t uType;
3366 switch (enmTaskSwitch)
3367 {
3368 case IEMTASKSWITCH_CALL: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_CALL; break;
3369 case IEMTASKSWITCH_IRET: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IRET; break;
3370 case IEMTASKSWITCH_JUMP: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_JMP; break;
3371 case IEMTASKSWITCH_INT_XCPT: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT; break;
3372 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3373 }
3374
3375 uint64_t const u64ExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_NEW_TSS, SelNewTss)
3376 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_SOURCE, uType);
3377 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
3378 return iemVmxVmexit(pVCpu, VMX_EXIT_TASK_SWITCH, u64ExitQual);
3379}
3380
3381
3382/**
3383 * VMX VM-exit handler for trap-like VM-exits.
3384 *
3385 * @returns VBox strict status code.
3386 * @param pVCpu The cross context virtual CPU structure.
3387 * @param pExitInfo Pointer to the VM-exit information.
3388 * @param pExitEventInfo Pointer to the VM-exit event information.
3389 */
3390IEM_STATIC VBOXSTRICTRC iemVmxVmexitTrapLikeWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
3391{
3392 Assert(VMXIsVmexitTrapLike(pExitInfo->uReason));
3393 iemVmxVmcsSetGuestPendingDbgXcpts(pVCpu, pExitInfo->u64GuestPendingDbgXcpts);
3394 return iemVmxVmexit(pVCpu, pExitInfo->uReason, pExitInfo->u64Qual);
3395}
3396
3397
3398/**
3399 * VMX VM-exit handler for VM-exits due to task switches.
3400 *
3401 * This is intended for task switches where the caller provides all the relevant
3402 * VM-exit information.
3403 *
3404 * @returns VBox strict status code.
3405 * @param pVCpu The cross context virtual CPU structure.
3406 * @param pExitInfo Pointer to the VM-exit information.
3407 * @param pExitEventInfo Pointer to the VM-exit event information.
3408 */
3409IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitchWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo,
3410 PCVMXVEXITEVENTINFO pExitEventInfo)
3411{
3412 Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
3413 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
3414 iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo);
3415 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode);
3416 return iemVmxVmexit(pVCpu, VMX_EXIT_TASK_SWITCH, pExitInfo->u64Qual);
3417}
3418
3419
3420/**
3421 * VMX VM-exit handler for VM-exits due to expiring of the preemption timer.
3422 *
3423 * @returns VBox strict status code.
3424 * @param pVCpu The cross context virtual CPU structure.
3425 */
3426IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPUCC pVCpu)
3427{
3428 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
3429 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER);
3430
3431 /* Import the hardware virtualization state (for nested-guest VM-entry TSC-tick). */
3432 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
3433
3434 /* Save the VMX-preemption timer value (of 0) back in to the VMCS if the CPU supports this feature. */
3435 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER)
3436 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PreemptTimer = 0;
3437
3438 /* Cause the VMX-preemption timer VM-exit. The Exit qualification MBZ. */
3439 return iemVmxVmexit(pVCpu, VMX_EXIT_PREEMPT_TIMER, 0 /* u64ExitQual */);
3440}
3441
3442
3443/**
3444 * VMX VM-exit handler for VM-exits due to external interrupts.
3445 *
3446 * @returns VBox strict status code.
3447 * @param pVCpu The cross context virtual CPU structure.
3448 * @param uVector The external interrupt vector (pass 0 if the interrupt
3449 * is still pending since we typically won't know the
3450 * vector).
3451 * @param fIntPending Whether the external interrupt is pending or
3452 * acknowledged in the interrupt controller.
3453 */
3454IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending)
3455{
3456 Assert(!fIntPending || uVector == 0);
3457
3458 /* The VM-exit is subject to "External interrupt exiting" being set. */
3459 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT)
3460 {
3461 if (fIntPending)
3462 {
3463 /*
3464 * If the interrupt is pending and we don't need to acknowledge the
3465 * interrupt on VM-exit, cause the VM-exit immediately.
3466 *
3467 * See Intel spec 25.2 "Other Causes Of VM Exits".
3468 */
3469 if (!(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT))
3470 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT, 0 /* u64ExitQual */);
3471
3472 /*
3473 * If the interrupt is pending and we -do- need to acknowledge the interrupt
3474 * on VM-exit, postpone VM-exit till after the interrupt controller has been
3475 * acknowledged that the interrupt has been consumed. Callers would have to call
3476 * us again after getting the vector (and ofc, with fIntPending with false).
3477 */
3478 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3479 }
3480
3481 /*
3482 * If the interrupt is no longer pending (i.e. it has been acknowledged) and the
3483 * "External interrupt exiting" and "Acknowledge interrupt on VM-exit" controls are
3484 * all set, we need to record the vector of the external interrupt in the
3485 * VM-exit interruption information field. Otherwise, mark this field as invalid.
3486 *
3487 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events".
3488 */
3489 uint32_t uExitIntInfo;
3490 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
3491 {
3492 bool const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret;
3493 uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, uVector)
3494 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_EXT_INT)
3495 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, fNmiUnblocking)
3496 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3497 }
3498 else
3499 uExitIntInfo = 0;
3500 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3501
3502 /*
3503 * Cause the VM-exit whether or not the vector has been stored
3504 * in the VM-exit interruption-information field.
3505 */
3506 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT, 0 /* u64ExitQual */);
3507 }
3508
3509 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3510}
3511
3512
3513/**
3514 * VMX VM-exit handler for VM-exits due to a double fault caused during delivery of
3515 * an event.
3516 *
3517 * @returns VBox strict status code.
3518 * @param pVCpu The cross context virtual CPU structure.
3519 */
3520IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu)
3521{
3522 uint32_t const fXcptBitmap = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32XcptBitmap;
3523 if (fXcptBitmap & RT_BIT(X86_XCPT_DF))
3524 {
3525 /*
3526 * The NMI-unblocking due to IRET field need not be set for double faults.
3527 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
3528 */
3529 uint32_t const uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_DF)
3530 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3531 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_ERR_CODE_VALID, 1)
3532 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, 0)
3533 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3534 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3535 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI, 0 /* u64ExitQual */);
3536 }
3537
3538 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3539}
3540
3541
3542/**
3543 * VMX VM-exit handler for VM-exit due to delivery of an events.
3544 *
3545 * This is intended for VM-exit due to exceptions or NMIs where the caller provides
3546 * all the relevant VM-exit information.
3547 *
3548 * @returns VBox strict status code.
3549 * @param pVCpu The cross context virtual CPU structure.
3550 * @param pExitInfo Pointer to the VM-exit information.
3551 * @param pExitEventInfo Pointer to the VM-exit event information.
3552 */
3553IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
3554{
3555 Assert(pExitInfo);
3556 Assert(pExitEventInfo);
3557 Assert(pExitInfo->uReason == VMX_EXIT_XCPT_OR_NMI);
3558 Assert(VMX_EXIT_INT_INFO_IS_VALID(pExitEventInfo->uExitIntInfo));
3559
3560 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
3561 iemVmxVmcsSetExitIntInfo(pVCpu, pExitEventInfo->uExitIntInfo);
3562 iemVmxVmcsSetExitIntErrCode(pVCpu, pExitEventInfo->uExitIntErrCode);
3563 iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo);
3564 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode);
3565 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI, pExitInfo->u64Qual);
3566}
3567
3568
3569/**
3570 * VMX VM-exit handler for VM-exits due to delivery of an event.
3571 *
3572 * @returns VBox strict status code.
3573 * @param pVCpu The cross context virtual CPU structure.
3574 * @param uVector The interrupt / exception vector.
3575 * @param fFlags The flags (see IEM_XCPT_FLAGS_XXX).
3576 * @param uErrCode The error code associated with the event.
3577 * @param uCr2 The CR2 value in case of a \#PF exception.
3578 * @param cbInstr The instruction length in bytes.
3579 */
3580IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2,
3581 uint8_t cbInstr)
3582{
3583 /*
3584 * If the event is being injected as part of VM-entry, it is -not- subject to event
3585 * intercepts in the nested-guest. However, secondary exceptions that occur during
3586 * injection of any event -are- subject to event interception.
3587 *
3588 * See Intel spec. 26.5.1.2 "VM Exits During Event Injection".
3589 */
3590 if (!CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx))
3591 {
3592 /*
3593 * If the event is a virtual-NMI (which is an NMI being inject during VM-entry)
3594 * virtual-NMI blocking must be set in effect rather than physical NMI blocking.
3595 *
3596 * See Intel spec. 24.6.1 "Pin-Based VM-Execution Controls".
3597 */
3598 if ( uVector == X86_XCPT_NMI
3599 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3600 && (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
3601 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = true;
3602 else
3603 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking);
3604
3605 CPUMSetGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx, true);
3606 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3607 }
3608
3609 /*
3610 * We are injecting an external interrupt, check if we need to cause a VM-exit now.
3611 * If not, the caller will continue delivery of the external interrupt as it would
3612 * normally. The interrupt is no longer pending in the interrupt controller at this
3613 * point.
3614 */
3615 if (fFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3616 {
3617 Assert(!VMX_IDT_VECTORING_INFO_IS_VALID(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32RoIdtVectoringInfo));
3618 return iemVmxVmexitExtInt(pVCpu, uVector, false /* fIntPending */);
3619 }
3620
3621 /*
3622 * Evaluate intercepts for hardware exceptions, software exceptions (#BP, #OF),
3623 * and privileged software exceptions (#DB generated by INT1/ICEBP) and software
3624 * interrupts.
3625 */
3626 Assert(fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_SOFT_INT));
3627 bool fIntercept;
3628 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3629 || (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR)))
3630 fIntercept = CPUMIsGuestVmxXcptInterceptSet(&pVCpu->cpum.GstCtx, uVector, uErrCode);
3631 else
3632 {
3633 /* Software interrupts cannot be intercepted and therefore do not cause a VM-exit. */
3634 fIntercept = false;
3635 }
3636
3637 /*
3638 * Now that we've determined whether the event causes a VM-exit, we need to construct the
3639 * relevant VM-exit information and cause the VM-exit.
3640 */
3641 if (fIntercept)
3642 {
3643 Assert(!(fFlags & IEM_XCPT_FLAGS_T_EXT_INT));
3644
3645 /* Construct the rest of the event related information fields and cause the VM-exit. */
3646 uint64_t u64ExitQual;
3647 if (uVector == X86_XCPT_PF)
3648 {
3649 Assert(fFlags & IEM_XCPT_FLAGS_CR2);
3650 u64ExitQual = uCr2;
3651 }
3652 else if (uVector == X86_XCPT_DB)
3653 {
3654 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
3655 u64ExitQual = pVCpu->cpum.GstCtx.dr[6] & VMX_VMCS_EXIT_QUAL_VALID_MASK;
3656 }
3657 else
3658 u64ExitQual = 0;
3659
3660 uint8_t const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret;
3661 bool const fErrCodeValid = RT_BOOL(fFlags & IEM_XCPT_FLAGS_ERR);
3662 uint8_t const uIntInfoType = iemVmxGetEventType(uVector, fFlags);
3663 uint32_t const uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, uVector)
3664 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, uIntInfoType)
3665 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_ERR_CODE_VALID, fErrCodeValid)
3666 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, fNmiUnblocking)
3667 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3668 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3669 iemVmxVmcsSetExitIntErrCode(pVCpu, uErrCode);
3670
3671 /*
3672 * For VM-exits due to software exceptions (those generated by INT3 or INTO) or privileged
3673 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
3674 * length.
3675 */
3676 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3677 || (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR)))
3678 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
3679 else
3680 iemVmxVmcsSetExitInstrLen(pVCpu, 0);
3681
3682 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI, u64ExitQual);
3683 }
3684
3685 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3686}
3687
3688
3689/**
3690 * VMX VM-exit handler for EPT misconfiguration.
3691 *
3692 * @param pVCpu The cross context virtual CPU structure.
3693 * @param GCPhysAddr The physical address causing the EPT misconfiguration. This
3694 * must be page aligned.
3695 */
3696IEM_STATIC VBOXSTRICTRC iemVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr)
3697{
3698 Assert(!(GCPhysAddr & PAGE_OFFSET_MASK));
3699 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, GCPhysAddr);
3700 return iemVmxVmexit(pVCpu, VMX_EXIT_EPT_MISCONFIG, 0 /* u64ExitQual */);
3701}
3702
3703
3704/**
3705 * VMX VM-exit handler for EPT violation.
3706 *
3707 * @param pVCpu The cross context virtual CPU structure.
3708 * @param fAccess The access causing the EPT violation, IEM_ACCESS_XXX.
3709 * @param fSlatFail The SLAT failure info, IEM_SLAT_FAIL_XXX.
3710 * @param fEptAccess The EPT paging structure bits.
3711 * @param GCPhysAddr The physical address causing the EPT violation. This
3712 * must be page aligned.
3713 * @param fIsLinearAddrValid Whether translation of a linear address caused this
3714 * EPT violation. If @c false, GCPtrAddr must be 0.
3715 * @param GCPtrAddr The linear address causing the EPT violation.
3716 * @param cbInstr The VM-exit instruction length.
3717 */
3718IEM_STATIC VBOXSTRICTRC iemVmxVmexitEptViolation(PVMCPUCC pVCpu, uint32_t fAccess, uint32_t fSlatFail, uint64_t fEptAccess,
3719 RTGCPHYS GCPhysAddr, bool fLinearAddrValid, uint64_t GCPtrAddr, uint8_t cbInstr)
3720{
3721 /*
3722 * If the linear address isn't valid (can happen when loading PDPTEs
3723 * as part of MOV CR execution) the linear address field is undefined.
3724 * While we can leave it this way, it's preferrable to zero it for consistency.
3725 */
3726 Assert(fLinearAddrValid || GCPtrAddr == 0);
3727 Assert(!(GCPhysAddr & PAGE_OFFSET_MASK));
3728
3729 uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps;
3730 uint8_t const fSupportsAccessDirty = fCaps & MSR_IA32_VMX_EPT_VPID_CAP_ACCESS_DIRTY;
3731
3732 uint8_t const fDataRead = ((fAccess & IEM_ACCESS_DATA_R) == IEM_ACCESS_DATA_R) | fSupportsAccessDirty;
3733 uint8_t const fDataWrite = ((fAccess & IEM_ACCESS_DATA_RW) == IEM_ACCESS_DATA_RW) | fSupportsAccessDirty;
3734 uint8_t const fInstrFetch = (fAccess & IEM_ACCESS_INSTRUCTION) == IEM_ACCESS_INSTRUCTION;
3735 bool const fEptRead = RT_BOOL(fEptAccess & EPT_E_READ);
3736 bool const fEptWrite = RT_BOOL(fEptAccess & EPT_E_WRITE);
3737 bool const fEptExec = RT_BOOL(fEptAccess & EPT_E_EXECUTE);
3738 bool const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret;
3739 bool const fLinearToPhysAddr = fLinearAddrValid & RT_BOOL(fSlatFail & IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR);
3740
3741 uint64_t const u64ExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ACCESS_READ, fDataRead)
3742 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ACCESS_WRITE, fDataWrite)
3743 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ACCESS_INSTR_FETCH, fInstrFetch)
3744 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ENTRY_READ, fEptRead)
3745 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ENTRY_WRITE, fEptWrite)
3746 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_ENTRY_EXECUTE, fEptExec)
3747 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_LINEAR_ADDR_VALID, fLinearAddrValid)
3748 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_LINEAR_TO_PHYS_ADDR, fLinearToPhysAddr)
3749 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_EPT_NMI_UNBLOCK_IRET, fNmiUnblocking);
3750
3751#ifdef VBOX_STRICT
3752 uint64_t const fMiscCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Misc;
3753 uint32_t const fProcCtls2 = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2;
3754 Assert(!(fCaps & MSR_IA32_VMX_EPT_VPID_CAP_ADVEXITINFO_EPT_VIOLATION)); /* Advanced VM-exit info. not supported */
3755 Assert(!(fCaps & MSR_IA32_VMX_EPT_VPID_CAP_SUPER_SHW_STACK)); /* Supervisor shadow stack control not supported. */
3756 Assert(!(RT_BF_GET(fMiscCaps, VMX_BF_MISC_INTEL_PT))); /* Intel PT not supported. */
3757 Assert(!(fProcCtls2 & VMX_PROC_CTLS2_MODE_BASED_EPT_PERM)); /* Mode-based execute control not supported. */
3758#endif
3759
3760 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, GCPhysAddr);
3761 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, GCPtrAddr);
3762 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
3763
3764 return iemVmxVmexit(pVCpu, VMX_EXIT_EPT_VIOLATION, u64ExitQual);
3765}
3766
3767
3768/**
3769 * VMX VM-exit handler for EPT-induced VM-exits.
3770 *
3771 * @param pVCpu The cross context virtual CPU structure.
3772 * @param pWalk The page walk info.
3773 * @param fAccess The access causing the EPT event, IEM_ACCESS_XXX.
3774 * @param fSlatFail Additional SLAT info, IEM_SLAT_FAIL_XXX.
3775 * @param cbInstr The VM-exit instruction length if applicable. Pass 0 if not
3776 * applicable.
3777 */
3778IEM_STATIC VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail,
3779 uint8_t cbInstr)
3780{
3781 Assert(pWalk->fIsSlat);
3782 Assert(pWalk->fFailed & PGM_WALKFAIL_EPT);
3783 Assert(!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEptXcptVe); /* #VE exceptions not supported. */
3784 Assert(!(pWalk->fFailed & PGM_WALKFAIL_EPT_VIOLATION_CONVERTIBLE)); /* Without #VE, convertible violations not possible. */
3785
3786 if (pWalk->fFailed & PGM_WALKFAIL_EPT_VIOLATION)
3787 {
3788 uint64_t const fEptAccess = (pWalk->fEffective & PGM_PTATTRS_EPT_MASK) >> PGM_PTATTRS_EPT_SHIFT;
3789 return iemVmxVmexitEptViolation(pVCpu, fAccess, fSlatFail, fEptAccess, pWalk->GCPhysNested, pWalk->fIsLinearAddrValid,
3790 pWalk->GCPtr, cbInstr);
3791 }
3792
3793 Assert(pWalk->fFailed & PGM_WALKFAIL_EPT_MISCONFIG);
3794 return iemVmxVmexitEptMisconfig(pVCpu, pWalk->GCPhysNested);
3795}
3796
3797
3798/**
3799 * VMX VM-exit handler for APIC accesses.
3800 *
3801 * @param pVCpu The cross context virtual CPU structure.
3802 * @param offAccess The offset of the register being accessed.
3803 * @param fAccess The type of access (must contain IEM_ACCESS_TYPE_READ or
3804 * IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION).
3805 */
3806IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPUCC pVCpu, uint16_t offAccess, uint32_t fAccess)
3807{
3808 Assert((fAccess & IEM_ACCESS_TYPE_READ) || (fAccess & IEM_ACCESS_TYPE_WRITE) || (fAccess & IEM_ACCESS_INSTRUCTION));
3809
3810 VMXAPICACCESS enmAccess;
3811 bool const fInEventDelivery = IEMGetCurrentXcpt(pVCpu, NULL, NULL, NULL, NULL);
3812 if (fInEventDelivery)
3813 enmAccess = VMXAPICACCESS_LINEAR_EVENT_DELIVERY;
3814 else if (fAccess & IEM_ACCESS_INSTRUCTION)
3815 enmAccess = VMXAPICACCESS_LINEAR_INSTR_FETCH;
3816 else if (fAccess & IEM_ACCESS_TYPE_WRITE)
3817 enmAccess = VMXAPICACCESS_LINEAR_WRITE;
3818 else
3819 enmAccess = VMXAPICACCESS_LINEAR_READ;
3820
3821 uint64_t const u64ExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_OFFSET, offAccess)
3822 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_TYPE, enmAccess);
3823 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_ACCESS, u64ExitQual);
3824}
3825
3826
3827/**
3828 * VMX VM-exit handler for APIC accesses.
3829 *
3830 * This is intended for APIC accesses where the caller provides all the
3831 * relevant VM-exit information.
3832 *
3833 * @returns VBox strict status code.
3834 * @param pVCpu The cross context virtual CPU structure.
3835 * @param pExitInfo Pointer to the VM-exit information.
3836 * @param pExitEventInfo Pointer to the VM-exit event information.
3837 */
3838IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccessWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo,
3839 PCVMXVEXITEVENTINFO pExitEventInfo)
3840{
3841 /* VM-exit interruption information should not be valid for APIC-access VM-exits. */
3842 Assert(!VMX_EXIT_INT_INFO_IS_VALID(pExitEventInfo->uExitIntInfo));
3843 Assert(pExitInfo->uReason == VMX_EXIT_APIC_ACCESS);
3844 iemVmxVmcsSetExitIntInfo(pVCpu, 0);
3845 iemVmxVmcsSetExitIntErrCode(pVCpu, 0);
3846 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
3847 iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo);
3848 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode);
3849 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_ACCESS, pExitInfo->u64Qual);
3850}
3851
3852
3853/**
3854 * VMX VM-exit handler for APIC-write VM-exits.
3855 *
3856 * @param pVCpu The cross context virtual CPU structure.
3857 * @param offApic The write to the virtual-APIC page offset that caused this
3858 * VM-exit.
3859 */
3860IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicWrite(PVMCPUCC pVCpu, uint16_t offApic)
3861{
3862 Assert(offApic < XAPIC_OFF_END + 4);
3863 /* Write only bits 11:0 of the APIC offset into the Exit qualification field. */
3864 offApic &= UINT16_C(0xfff);
3865 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_WRITE, offApic);
3866}
3867
3868
3869/**
3870 * Sets virtual-APIC write emulation as pending.
3871 *
3872 * @param pVCpu The cross context virtual CPU structure.
3873 * @param offApic The offset in the virtual-APIC page that was written.
3874 */
3875DECLINLINE(void) iemVmxVirtApicSetPendingWrite(PVMCPUCC pVCpu, uint16_t offApic)
3876{
3877 Assert(offApic < XAPIC_OFF_END + 4);
3878
3879 /*
3880 * Record the currently updated APIC offset, as we need this later for figuring
3881 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
3882 * as for supplying the exit qualification when causing an APIC-write VM-exit.
3883 */
3884 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
3885
3886 /*
3887 * Flag that we need to perform virtual-APIC write emulation (TPR/PPR/EOI/Self-IPI
3888 * virtualization or APIC-write emulation).
3889 */
3890 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3891 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
3892}
3893
3894
3895/**
3896 * Clears any pending virtual-APIC write emulation.
3897 *
3898 * @returns The virtual-APIC offset that was written before clearing it.
3899 * @param pVCpu The cross context virtual CPU structure.
3900 */
3901DECLINLINE(uint16_t) iemVmxVirtApicClearPendingWrite(PVMCPUCC pVCpu)
3902{
3903 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
3904 uint8_t const offVirtApicWrite = pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite;
3905 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = 0;
3906 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
3907 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
3908 return offVirtApicWrite;
3909}
3910
3911
3912/**
3913 * Reads a 32-bit register from the virtual-APIC page at the given offset.
3914 *
3915 * @returns The register from the virtual-APIC page.
3916 * @param pVCpu The cross context virtual CPU structure.
3917 * @param offReg The offset of the register being read.
3918 */
3919IEM_STATIC uint32_t iemVmxVirtApicReadRaw32(PVMCPUCC pVCpu, uint16_t offReg)
3920{
3921 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
3922
3923 uint32_t uReg = 0;
3924 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
3925 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg));
3926 AssertMsgStmt(RT_SUCCESS(rc),
3927 ("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp: %Rrc\n",
3928 sizeof(uReg), offReg, GCPhysVirtApic, rc),
3929 uReg = 0);
3930 return uReg;
3931}
3932
3933
3934/**
3935 * Reads a 64-bit register from the virtual-APIC page at the given offset.
3936 *
3937 * @returns The register from the virtual-APIC page.
3938 * @param pVCpu The cross context virtual CPU structure.
3939 * @param offReg The offset of the register being read.
3940 */
3941IEM_STATIC uint64_t iemVmxVirtApicReadRaw64(PVMCPUCC pVCpu, uint16_t offReg)
3942{
3943 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint64_t));
3944
3945 uint64_t uReg = 0;
3946 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
3947 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg));
3948 AssertMsgStmt(RT_SUCCESS(rc),
3949 ("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp: %Rrc\n",
3950 sizeof(uReg), offReg, GCPhysVirtApic, rc),
3951 uReg = 0);
3952 return uReg;
3953}
3954
3955
3956/**
3957 * Writes a 32-bit register to the virtual-APIC page at the given offset.
3958 *
3959 * @param pVCpu The cross context virtual CPU structure.
3960 * @param offReg The offset of the register being written.
3961 * @param uReg The register value to write.
3962 */
3963IEM_STATIC void iemVmxVirtApicWriteRaw32(PVMCPUCC pVCpu, uint16_t offReg, uint32_t uReg)
3964{
3965 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
3966
3967 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
3968 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg));
3969 AssertMsgRC(rc, ("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp: %Rrc\n",
3970 sizeof(uReg), offReg, GCPhysVirtApic, rc));
3971}
3972
3973
3974/**
3975 * Writes a 64-bit register to the virtual-APIC page at the given offset.
3976 *
3977 * @param pVCpu The cross context virtual CPU structure.
3978 * @param offReg The offset of the register being written.
3979 * @param uReg The register value to write.
3980 */
3981IEM_STATIC void iemVmxVirtApicWriteRaw64(PVMCPUCC pVCpu, uint16_t offReg, uint64_t uReg)
3982{
3983 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint64_t));
3984
3985 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
3986 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg));
3987 AssertMsgRC(rc, ("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp: %Rrc\n",
3988 sizeof(uReg), offReg, GCPhysVirtApic, rc));
3989}
3990
3991
3992/**
3993 * Sets the vector in a virtual-APIC 256-bit sparse register.
3994 *
3995 * @param pVCpu The cross context virtual CPU structure.
3996 * @param offReg The offset of the 256-bit spare register.
3997 * @param uVector The vector to set.
3998 *
3999 * @remarks This is based on our APIC device code.
4000 */
4001IEM_STATIC void iemVmxVirtApicSetVectorInReg(PVMCPUCC pVCpu, uint16_t offReg, uint8_t uVector)
4002{
4003 /* Determine the vector offset within the chunk. */
4004 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1;
4005
4006 /* Read the chunk at the offset. */
4007 uint32_t uReg;
4008 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
4009 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg));
4010 if (RT_SUCCESS(rc))
4011 {
4012 /* Modify the chunk. */
4013 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
4014 uReg |= RT_BIT(idxVectorBit);
4015
4016 /* Write the chunk. */
4017 rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg));
4018 AssertMsgRC(rc, ("Failed to set vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp: %Rrc\n",
4019 uVector, offReg, GCPhysVirtApic, rc));
4020 }
4021 else
4022 AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp: %Rrc\n",
4023 uVector, offReg, GCPhysVirtApic, rc));
4024}
4025
4026
4027/**
4028 * Clears the vector in a virtual-APIC 256-bit sparse register.
4029 *
4030 * @param pVCpu The cross context virtual CPU structure.
4031 * @param offReg The offset of the 256-bit spare register.
4032 * @param uVector The vector to clear.
4033 *
4034 * @remarks This is based on our APIC device code.
4035 */
4036IEM_STATIC void iemVmxVirtApicClearVectorInReg(PVMCPUCC pVCpu, uint16_t offReg, uint8_t uVector)
4037{
4038 /* Determine the vector offset within the chunk. */
4039 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1;
4040
4041 /* Read the chunk at the offset. */
4042 uint32_t uReg;
4043 RTGCPHYS const GCPhysVirtApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrVirtApic.u;
4044 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg));
4045 if (RT_SUCCESS(rc))
4046 {
4047 /* Modify the chunk. */
4048 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
4049 uReg &= ~RT_BIT(idxVectorBit);
4050
4051 /* Write the chunk. */
4052 rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg));
4053 AssertMsgRC(rc, ("Failed to clear vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp: %Rrc\n",
4054 uVector, offReg, GCPhysVirtApic, rc));
4055 }
4056 else
4057 AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp: %Rrc\n",
4058 uVector, offReg, GCPhysVirtApic, rc));
4059}
4060
4061
4062/**
4063 * Checks if a memory access to the APIC-access page must causes an APIC-access
4064 * VM-exit.
4065 *
4066 * @param pVCpu The cross context virtual CPU structure.
4067 * @param offAccess The offset of the register being accessed.
4068 * @param cbAccess The size of the access in bytes.
4069 * @param fAccess The type of access (must be IEM_ACCESS_TYPE_READ or
4070 * IEM_ACCESS_TYPE_WRITE).
4071 *
4072 * @remarks This must not be used for MSR-based APIC-access page accesses!
4073 * @sa iemVmxVirtApicAccessMsrWrite, iemVmxVirtApicAccessMsrRead.
4074 */
4075IEM_STATIC bool iemVmxVirtApicIsMemAccessIntercepted(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, uint32_t fAccess)
4076{
4077 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4078 Assert(fAccess == IEM_ACCESS_TYPE_READ || fAccess == IEM_ACCESS_TYPE_WRITE);
4079
4080 /*
4081 * We must cause a VM-exit if any of the following are true:
4082 * - TPR shadowing isn't active.
4083 * - The access size exceeds 32-bits.
4084 * - The access is not contained within low 4 bytes of a 16-byte aligned offset.
4085 *
4086 * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
4087 * See Intel spec. 29.4.3.1 "Determining Whether a Write Access is Virtualized".
4088 */
4089 if ( !(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4090 || cbAccess > sizeof(uint32_t)
4091 || ((offAccess + cbAccess - 1) & 0xc)
4092 || offAccess >= XAPIC_OFF_END + 4)
4093 return true;
4094
4095 /*
4096 * If the access is part of an operation where we have already
4097 * virtualized a virtual-APIC write, we must cause a VM-exit.
4098 */
4099 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4100 return true;
4101
4102 /*
4103 * Check write accesses to the APIC-access page that cause VM-exits.
4104 */
4105 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4106 {
4107 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4108 {
4109 /*
4110 * With APIC-register virtualization, a write access to any of the
4111 * following registers are virtualized. Accessing any other register
4112 * causes a VM-exit.
4113 */
4114 uint16_t const offAlignedAccess = offAccess & 0xfffc;
4115 switch (offAlignedAccess)
4116 {
4117 case XAPIC_OFF_ID:
4118 case XAPIC_OFF_TPR:
4119 case XAPIC_OFF_EOI:
4120 case XAPIC_OFF_LDR:
4121 case XAPIC_OFF_DFR:
4122 case XAPIC_OFF_SVR:
4123 case XAPIC_OFF_ESR:
4124 case XAPIC_OFF_ICR_LO:
4125 case XAPIC_OFF_ICR_HI:
4126 case XAPIC_OFF_LVT_TIMER:
4127 case XAPIC_OFF_LVT_THERMAL:
4128 case XAPIC_OFF_LVT_PERF:
4129 case XAPIC_OFF_LVT_LINT0:
4130 case XAPIC_OFF_LVT_LINT1:
4131 case XAPIC_OFF_LVT_ERROR:
4132 case XAPIC_OFF_TIMER_ICR:
4133 case XAPIC_OFF_TIMER_DCR:
4134 break;
4135 default:
4136 return true;
4137 }
4138 }
4139 else if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4140 {
4141 /*
4142 * With virtual-interrupt delivery, a write access to any of the
4143 * following registers are virtualized. Accessing any other register
4144 * causes a VM-exit.
4145 *
4146 * Note! The specification does not allow writing to offsets in-between
4147 * these registers (e.g. TPR + 1 byte) unlike read accesses.
4148 */
4149 switch (offAccess)
4150 {
4151 case XAPIC_OFF_TPR:
4152 case XAPIC_OFF_EOI:
4153 case XAPIC_OFF_ICR_LO:
4154 break;
4155 default:
4156 return true;
4157 }
4158 }
4159 else
4160 {
4161 /*
4162 * Without APIC-register virtualization or virtual-interrupt delivery,
4163 * only TPR accesses are virtualized.
4164 */
4165 if (offAccess == XAPIC_OFF_TPR)
4166 { /* likely */ }
4167 else
4168 return true;
4169 }
4170 }
4171 else
4172 {
4173 /*
4174 * Check read accesses to the APIC-access page that cause VM-exits.
4175 */
4176 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4177 {
4178 /*
4179 * With APIC-register virtualization, a read access to any of the
4180 * following registers are virtualized. Accessing any other register
4181 * causes a VM-exit.
4182 */
4183 uint16_t const offAlignedAccess = offAccess & 0xfffc;
4184 switch (offAlignedAccess)
4185 {
4186 /** @todo r=ramshankar: What about XAPIC_OFF_LVT_CMCI? */
4187 case XAPIC_OFF_ID:
4188 case XAPIC_OFF_VERSION:
4189 case XAPIC_OFF_TPR:
4190 case XAPIC_OFF_EOI:
4191 case XAPIC_OFF_LDR:
4192 case XAPIC_OFF_DFR:
4193 case XAPIC_OFF_SVR:
4194 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
4195 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
4196 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
4197 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
4198 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
4199 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
4200 case XAPIC_OFF_ESR:
4201 case XAPIC_OFF_ICR_LO:
4202 case XAPIC_OFF_ICR_HI:
4203 case XAPIC_OFF_LVT_TIMER:
4204 case XAPIC_OFF_LVT_THERMAL:
4205 case XAPIC_OFF_LVT_PERF:
4206 case XAPIC_OFF_LVT_LINT0:
4207 case XAPIC_OFF_LVT_LINT1:
4208 case XAPIC_OFF_LVT_ERROR:
4209 case XAPIC_OFF_TIMER_ICR:
4210 case XAPIC_OFF_TIMER_DCR:
4211 break;
4212 default:
4213 return true;
4214 }
4215 }
4216 else
4217 {
4218 /* Without APIC-register virtualization, only TPR accesses are virtualized. */
4219 if (offAccess == XAPIC_OFF_TPR)
4220 { /* likely */ }
4221 else
4222 return true;
4223 }
4224 }
4225
4226 /* The APIC access is virtualized, does not cause a VM-exit. */
4227 return false;
4228}
4229
4230
4231/**
4232 * Virtualizes a memory-based APIC access where the address is not used to access
4233 * memory.
4234 *
4235 * This is for instructions like MONITOR, CLFLUSH, CLFLUSHOPT, ENTER which may cause
4236 * page-faults but do not use the address to access memory.
4237 *
4238 * @param pVCpu The cross context virtual CPU structure.
4239 * @param pGCPhysAccess Pointer to the guest-physical address used.
4240 */
4241IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPUCC pVCpu, PRTGCPHYS pGCPhysAccess)
4242{
4243 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
4244 Assert(pGCPhysAccess);
4245
4246 RTGCPHYS const GCPhysAccess = *pGCPhysAccess & ~(RTGCPHYS)PAGE_OFFSET_MASK;
4247 RTGCPHYS const GCPhysApic = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u64AddrApicAccess.u;
4248 Assert(!(GCPhysApic & PAGE_OFFSET_MASK));
4249
4250 if (GCPhysAccess == GCPhysApic)
4251 {
4252 uint16_t const offAccess = *pGCPhysAccess & PAGE_OFFSET_MASK;
4253 uint32_t const fAccess = IEM_ACCESS_TYPE_READ;
4254 uint16_t const cbAccess = 1;
4255 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess);
4256 if (fIntercept)
4257 return iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess);
4258
4259 *pGCPhysAccess = GCPhysApic | offAccess;
4260 return VINF_VMX_MODIFIES_BEHAVIOR;
4261 }
4262
4263 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4264}
4265
4266
4267/**
4268 * Virtualizes a memory-based APIC access.
4269 *
4270 * @returns VBox strict status code.
4271 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the access was virtualized.
4272 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
4273 *
4274 * @param pVCpu The cross context virtual CPU structure.
4275 * @param offAccess The offset of the register being accessed (within the
4276 * APIC-access page).
4277 * @param cbAccess The size of the access in bytes.
4278 * @param pvData Pointer to the data being written or where to store the data
4279 * being read.
4280 * @param fAccess The type of access (must contain IEM_ACCESS_TYPE_READ or
4281 * IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION).
4282 */
4283IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData,
4284 uint32_t fAccess)
4285{
4286 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
4287 Assert(pvData);
4288 Assert( (fAccess & IEM_ACCESS_TYPE_READ)
4289 || (fAccess & IEM_ACCESS_TYPE_WRITE)
4290 || (fAccess & IEM_ACCESS_INSTRUCTION));
4291
4292 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess);
4293 if (fIntercept)
4294 return iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess);
4295
4296 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4297 {
4298 /*
4299 * A write access to the APIC-access page that is virtualized (rather than
4300 * causing a VM-exit) writes data to the virtual-APIC page.
4301 */
4302 uint32_t const u32Data = *(uint32_t *)pvData;
4303 iemVmxVirtApicWriteRaw32(pVCpu, offAccess, u32Data);
4304
4305 /*
4306 * Record the currently updated APIC offset, as we need this later for figuring
4307 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4308 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4309 *
4310 * After completion of the current operation, we need to perform TPR virtualization,
4311 * EOI virtualization or APIC-write VM-exit depending on which register was written.
4312 *
4313 * The current operation may be a REP-prefixed string instruction, execution of any
4314 * other instruction, or delivery of an event through the IDT.
4315 *
4316 * Thus things like clearing bytes 3:1 of the VTPR, clearing VEOI are not to be
4317 * performed now but later after completion of the current operation.
4318 *
4319 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4320 */
4321 iemVmxVirtApicSetPendingWrite(pVCpu, offAccess);
4322 }
4323 else
4324 {
4325 /*
4326 * A read access from the APIC-access page that is virtualized (rather than
4327 * causing a VM-exit) returns data from the virtual-APIC page.
4328 *
4329 * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
4330 */
4331 Assert(cbAccess <= 4);
4332 Assert(offAccess < XAPIC_OFF_END + 4);
4333 static uint32_t const s_auAccessSizeMasks[] = { 0, 0xff, 0xffff, 0xffffff, 0xffffffff };
4334
4335 uint32_t u32Data = iemVmxVirtApicReadRaw32(pVCpu, offAccess);
4336 u32Data &= s_auAccessSizeMasks[cbAccess];
4337 *(uint32_t *)pvData = u32Data;
4338 }
4339
4340 return VINF_VMX_MODIFIES_BEHAVIOR;
4341}
4342
4343
4344/**
4345 * Virtualizes an MSR-based APIC read access.
4346 *
4347 * @returns VBox strict status code.
4348 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR read was virtualized.
4349 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR read access must be
4350 * handled by the x2APIC device.
4351 * @retval VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
4352 * not within the range of valid MSRs, caller must raise \#GP(0).
4353 * @param pVCpu The cross context virtual CPU structure.
4354 * @param idMsr The x2APIC MSR being read.
4355 * @param pu64Value Where to store the read x2APIC MSR value (only valid when
4356 * VINF_VMX_MODIFIES_BEHAVIOR is returned).
4357 */
4358IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value)
4359{
4360 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
4361 Assert(pu64Value);
4362
4363 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4364 {
4365 if ( idMsr >= MSR_IA32_X2APIC_START
4366 && idMsr <= MSR_IA32_X2APIC_END)
4367 {
4368 uint16_t const offReg = (idMsr & 0xff) << 4;
4369 uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
4370 *pu64Value = u64Value;
4371 return VINF_VMX_MODIFIES_BEHAVIOR;
4372 }
4373 return VERR_OUT_OF_RANGE;
4374 }
4375
4376 if (idMsr == MSR_IA32_X2APIC_TPR)
4377 {
4378 uint16_t const offReg = (idMsr & 0xff) << 4;
4379 uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
4380 *pu64Value = u64Value;
4381 return VINF_VMX_MODIFIES_BEHAVIOR;
4382 }
4383
4384 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4385}
4386
4387
4388/**
4389 * Virtualizes an MSR-based APIC write access.
4390 *
4391 * @returns VBox strict status code.
4392 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR write was virtualized.
4393 * @retval VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
4394 * not within the range of valid MSRs, caller must raise \#GP(0).
4395 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR must be written normally.
4396 *
4397 * @param pVCpu The cross context virtual CPU structure.
4398 * @param idMsr The x2APIC MSR being written.
4399 * @param u64Value The value of the x2APIC MSR being written.
4400 */
4401IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Value)
4402{
4403 /*
4404 * Check if the access is to be virtualized.
4405 * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
4406 */
4407 if ( idMsr == MSR_IA32_X2APIC_TPR
4408 || ( (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4409 && ( idMsr == MSR_IA32_X2APIC_EOI
4410 || idMsr == MSR_IA32_X2APIC_SELF_IPI)))
4411 {
4412 /* Validate the MSR write depending on the register. */
4413 switch (idMsr)
4414 {
4415 case MSR_IA32_X2APIC_TPR:
4416 case MSR_IA32_X2APIC_SELF_IPI:
4417 {
4418 if (u64Value & UINT64_C(0xffffffffffffff00))
4419 return VERR_OUT_OF_RANGE;
4420 break;
4421 }
4422 case MSR_IA32_X2APIC_EOI:
4423 {
4424 if (u64Value != 0)
4425 return VERR_OUT_OF_RANGE;
4426 break;
4427 }
4428 }
4429
4430 /* Write the MSR to the virtual-APIC page. */
4431 uint16_t const offReg = (idMsr & 0xff) << 4;
4432 iemVmxVirtApicWriteRaw64(pVCpu, offReg, u64Value);
4433
4434 /*
4435 * Record the currently updated APIC offset, as we need this later for figuring
4436 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4437 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4438 */
4439 iemVmxVirtApicSetPendingWrite(pVCpu, offReg);
4440
4441 return VINF_VMX_MODIFIES_BEHAVIOR;
4442 }
4443
4444 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4445}
4446
4447
4448/**
4449 * Finds the most significant set bit in a virtual-APIC 256-bit sparse register.
4450 *
4451 * @returns VBox status code.
4452 * @retval VINF_SUCCESS when the highest set bit is found.
4453 * @retval VERR_NOT_FOUND when no bit is set.
4454 *
4455 * @param pVCpu The cross context virtual CPU structure.
4456 * @param offReg The offset of the APIC 256-bit sparse register.
4457 * @param pidxHighestBit Where to store the highest bit (most significant bit)
4458 * set in the register. Only valid when VINF_SUCCESS is
4459 * returned.
4460 *
4461 * @remarks The format of the 256-bit sparse register here mirrors that found in
4462 * real APIC hardware.
4463 */
4464static int iemVmxVirtApicGetHighestSetBitInReg(PVMCPUCC pVCpu, uint16_t offReg, uint8_t *pidxHighestBit)
4465{
4466 Assert(offReg < XAPIC_OFF_END + 4);
4467 Assert(pidxHighestBit);
4468
4469 /*
4470 * There are 8 contiguous fragments (of 16-bytes each) in the sparse register.
4471 * However, in each fragment only the first 4 bytes are used.
4472 */
4473 uint8_t const cFrags = 8;
4474 for (int8_t iFrag = cFrags; iFrag >= 0; iFrag--)
4475 {
4476 uint16_t const offFrag = iFrag * 16;
4477 uint32_t const u32Frag = iemVmxVirtApicReadRaw32(pVCpu, offReg + offFrag);
4478 if (!u32Frag)
4479 continue;
4480
4481 unsigned idxHighestBit = ASMBitLastSetU32(u32Frag);
4482 Assert(idxHighestBit > 0);
4483 --idxHighestBit;
4484 Assert(idxHighestBit <= UINT8_MAX);
4485 *pidxHighestBit = idxHighestBit;
4486 return VINF_SUCCESS;
4487 }
4488 return VERR_NOT_FOUND;
4489}
4490
4491
4492/**
4493 * Evaluates pending virtual interrupts.
4494 *
4495 * @param pVCpu The cross context virtual CPU structure.
4496 */
4497IEM_STATIC void iemVmxEvalPendingVirtIntrs(PVMCPUCC pVCpu)
4498{
4499 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4500
4501 if (!(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4502 {
4503 uint8_t const uRvi = RT_LO_U8(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u16GuestIntStatus);
4504 uint8_t const uPpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_PPR);
4505
4506 if ((uRvi >> 4) > (uPpr >> 4))
4507 {
4508 Log2(("eval_virt_intrs: uRvi=%#x uPpr=%#x - Signalling pending interrupt\n", uRvi, uPpr));
4509 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
4510 }
4511 else
4512 Log2(("eval_virt_intrs: uRvi=%#x uPpr=%#x - Nothing to do\n", uRvi, uPpr));
4513 }
4514}
4515
4516
4517/**
4518 * Performs PPR virtualization.
4519 *
4520 * @returns VBox strict status code.
4521 * @param pVCpu The cross context virtual CPU structure.
4522 */
4523IEM_STATIC void iemVmxPprVirtualization(PVMCPUCC pVCpu)
4524{
4525 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4526 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4527
4528 /*
4529 * PPR virtualization is caused in response to a VM-entry, TPR-virtualization,
4530 * or EOI-virtualization.
4531 *
4532 * See Intel spec. 29.1.3 "PPR Virtualization".
4533 */
4534 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4535 uint32_t const uSvi = RT_HI_U8(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u16GuestIntStatus);
4536
4537 uint32_t uPpr;
4538 if (((uTpr >> 4) & 0xf) >= ((uSvi >> 4) & 0xf))
4539 uPpr = uTpr & 0xff;
4540 else
4541 uPpr = uSvi & 0xf0;
4542
4543 Log2(("ppr_virt: uTpr=%#x uSvi=%#x uPpr=%#x\n", uTpr, uSvi, uPpr));
4544 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_PPR, uPpr);
4545}
4546
4547
4548/**
4549 * Performs VMX TPR virtualization.
4550 *
4551 * @returns VBox strict status code.
4552 * @param pVCpu The cross context virtual CPU structure.
4553 */
4554IEM_STATIC VBOXSTRICTRC iemVmxTprVirtualization(PVMCPUCC pVCpu)
4555{
4556 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4557
4558 /*
4559 * We should have already performed the virtual-APIC write to the TPR offset
4560 * in the virtual-APIC page. We now perform TPR virtualization.
4561 *
4562 * See Intel spec. 29.1.2 "TPR Virtualization".
4563 */
4564 if (!(pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
4565 {
4566 uint32_t const uTprThreshold = pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32TprThreshold;
4567 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4568
4569 /*
4570 * If the VTPR falls below the TPR threshold, we must cause a VM-exit.
4571 * See Intel spec. 29.1.2 "TPR Virtualization".
4572 */
4573 if (((uTpr >> 4) & 0xf) < uTprThreshold)
4574 {
4575 Log2(("tpr_virt: uTpr=%u uTprThreshold=%u -> VM-exit\n", uTpr, uTprThreshold));
4576 return iemVmxVmexit(pVCpu, VMX_EXIT_TPR_BELOW_THRESHOLD, 0 /* u64ExitQual */);
4577 }
4578 }
4579 else
4580 {
4581 iemVmxPprVirtualization(pVCpu);
4582 iemVmxEvalPendingVirtIntrs(pVCpu);
4583 }
4584
4585 return VINF_SUCCESS;
4586}
4587
4588
4589/**
4590 * Checks whether an EOI write for the given interrupt vector causes a VM-exit or
4591 * not.
4592 *
4593 * @returns @c true if the EOI write is intercepted, @c false otherwise.
4594 * @param pVCpu The cross context virtual CPU structure.
4595 * @param uVector The interrupt that was acknowledged using an EOI.
4596 */
4597IEM_STATIC bool iemVmxIsEoiInterceptSet(PCVMCPU pVCpu, uint8_t uVector)
4598{
4599 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4600 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4601
4602 if (uVector < 64)
4603 return RT_BOOL(pVmcs->u64EoiExitBitmap0.u & RT_BIT_64(uVector));
4604 if (uVector < 128)
4605 return RT_BOOL(pVmcs->u64EoiExitBitmap1.u & RT_BIT_64(uVector));
4606 if (uVector < 192)
4607 return RT_BOOL(pVmcs->u64EoiExitBitmap2.u & RT_BIT_64(uVector));
4608 return RT_BOOL(pVmcs->u64EoiExitBitmap3.u & RT_BIT_64(uVector));
4609}
4610
4611
4612/**
4613 * Performs EOI virtualization.
4614 *
4615 * @returns VBox strict status code.
4616 * @param pVCpu The cross context virtual CPU structure.
4617 */
4618IEM_STATIC VBOXSTRICTRC iemVmxEoiVirtualization(PVMCPUCC pVCpu)
4619{
4620 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4621 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4622
4623 /*
4624 * Clear the interrupt guest-interrupt as no longer in-service (ISR)
4625 * and get the next guest-interrupt that's in-service (if any).
4626 *
4627 * See Intel spec. 29.1.4 "EOI Virtualization".
4628 */
4629 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
4630 uint8_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
4631 Log2(("eoi_virt: uRvi=%#x uSvi=%#x\n", uRvi, uSvi));
4632
4633 uint8_t uVector = uSvi;
4634 iemVmxVirtApicClearVectorInReg(pVCpu, XAPIC_OFF_ISR0, uVector);
4635
4636 uVector = 0;
4637 iemVmxVirtApicGetHighestSetBitInReg(pVCpu, XAPIC_OFF_ISR0, &uVector);
4638
4639 if (uVector)
4640 Log2(("eoi_virt: next interrupt %#x\n", uVector));
4641 else
4642 Log2(("eoi_virt: no interrupt pending in ISR\n"));
4643
4644 /* Update guest-interrupt status SVI (leave RVI portion as it is) in the VMCS. */
4645 pVmcs->u16GuestIntStatus = RT_MAKE_U16(uRvi, uVector);
4646
4647 iemVmxPprVirtualization(pVCpu);
4648 if (iemVmxIsEoiInterceptSet(pVCpu, uVector))
4649 return iemVmxVmexit(pVCpu, VMX_EXIT_VIRTUALIZED_EOI, uVector);
4650 iemVmxEvalPendingVirtIntrs(pVCpu);
4651 return VINF_SUCCESS;
4652}
4653
4654
4655/**
4656 * Performs self-IPI virtualization.
4657 *
4658 * @returns VBox strict status code.
4659 * @param pVCpu The cross context virtual CPU structure.
4660 */
4661IEM_STATIC VBOXSTRICTRC iemVmxSelfIpiVirtualization(PVMCPUCC pVCpu)
4662{
4663 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4664 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4665
4666 /*
4667 * We should have already performed the virtual-APIC write to the self-IPI offset
4668 * in the virtual-APIC page. We now perform self-IPI virtualization.
4669 *
4670 * See Intel spec. 29.1.5 "Self-IPI Virtualization".
4671 */
4672 uint8_t const uVector = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_ICR_LO);
4673 Log2(("self_ipi_virt: uVector=%#x\n", uVector));
4674 iemVmxVirtApicSetVectorInReg(pVCpu, XAPIC_OFF_IRR0, uVector);
4675 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
4676 uint8_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
4677 if (uVector > uRvi)
4678 pVmcs->u16GuestIntStatus = RT_MAKE_U16(uVector, uSvi);
4679 iemVmxEvalPendingVirtIntrs(pVCpu);
4680 return VINF_SUCCESS;
4681}
4682
4683
4684/**
4685 * Performs VMX APIC-write emulation.
4686 *
4687 * @returns VBox strict status code.
4688 * @param pVCpu The cross context virtual CPU structure.
4689 */
4690IEM_STATIC VBOXSTRICTRC iemVmxApicWriteEmulation(PVMCPUCC pVCpu)
4691{
4692 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4693
4694 /* Import the virtual-APIC write offset (part of the hardware-virtualization state). */
4695 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
4696
4697 /*
4698 * Perform APIC-write emulation based on the virtual-APIC register written.
4699 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4700 */
4701 uint16_t const offApicWrite = iemVmxVirtApicClearPendingWrite(pVCpu);
4702 VBOXSTRICTRC rcStrict;
4703 switch (offApicWrite)
4704 {
4705 case XAPIC_OFF_TPR:
4706 {
4707 /* Clear bytes 3:1 of the VTPR and perform TPR virtualization. */
4708 uint32_t uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4709 uTpr &= UINT32_C(0x000000ff);
4710 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uTpr);
4711 Log2(("iemVmxApicWriteEmulation: TPR write %#x\n", uTpr));
4712 rcStrict = iemVmxTprVirtualization(pVCpu);
4713 break;
4714 }
4715
4716 case XAPIC_OFF_EOI:
4717 {
4718 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4719 {
4720 /* Clear VEOI and perform EOI virtualization. */
4721 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_EOI, 0);
4722 Log2(("iemVmxApicWriteEmulation: EOI write\n"));
4723 rcStrict = iemVmxEoiVirtualization(pVCpu);
4724 }
4725 else
4726 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4727 break;
4728 }
4729
4730 case XAPIC_OFF_ICR_LO:
4731 {
4732 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4733 {
4734 /* If the ICR_LO is valid, write it and perform self-IPI virtualization. */
4735 uint32_t const uIcrLo = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4736 uint32_t const fIcrLoMb0 = UINT32_C(0xfffbb700);
4737 uint32_t const fIcrLoMb1 = UINT32_C(0x000000f0);
4738 if ( !(uIcrLo & fIcrLoMb0)
4739 && (uIcrLo & fIcrLoMb1))
4740 {
4741 Log2(("iemVmxApicWriteEmulation: Self-IPI virtualization with vector %#x\n", (uIcrLo & 0xff)));
4742 rcStrict = iemVmxSelfIpiVirtualization(pVCpu);
4743 }
4744 else
4745 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4746 }
4747 else
4748 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4749 break;
4750 }
4751
4752 case XAPIC_OFF_ICR_HI:
4753 {
4754 /* Clear bytes 2:0 of VICR_HI. No other virtualization or VM-exit must occur. */
4755 uint32_t uIcrHi = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_ICR_HI);
4756 uIcrHi &= UINT32_C(0xff000000);
4757 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_ICR_HI, uIcrHi);
4758 rcStrict = VINF_SUCCESS;
4759 break;
4760 }
4761
4762 default:
4763 {
4764 /* Writes to any other virtual-APIC register causes an APIC-write VM-exit. */
4765 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4766 break;
4767 }
4768 }
4769
4770 return rcStrict;
4771}
4772
4773
4774/**
4775 * Checks guest control registers, debug registers and MSRs as part of VM-entry.
4776 *
4777 * @param pVCpu The cross context virtual CPU structure.
4778 * @param pszInstr The VMX instruction name (for logging purposes).
4779 */
4780DECLINLINE(int) iemVmxVmentryCheckGuestControlRegsMsrs(PVMCPUCC pVCpu, const char *pszInstr)
4781{
4782 /*
4783 * Guest Control Registers, Debug Registers, and MSRs.
4784 * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs".
4785 */
4786 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4787 const char * const pszFailure = "VM-exit";
4788 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
4789
4790 /* CR0 reserved bits. */
4791 {
4792 /* CR0 MB1 bits. */
4793 uint64_t u64Cr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
4794 Assert(!(u64Cr0Fixed0 & (X86_CR0_NW | X86_CR0_CD)));
4795 if (fUnrestrictedGuest)
4796 u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
4797 if ((pVmcs->u64GuestCr0.u & u64Cr0Fixed0) == u64Cr0Fixed0)
4798 { /* likely */ }
4799 else
4800 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed0);
4801
4802 /* CR0 MBZ bits. */
4803 uint64_t const u64Cr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
4804 if (!(pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1))
4805 { /* likely */ }
4806 else
4807 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1);
4808
4809 /* Without unrestricted guest support, VT-x supports does not support unpaged protected mode. */
4810 if ( !fUnrestrictedGuest
4811 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)
4812 && !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
4813 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0PgPe);
4814 }
4815
4816 /* CR4 reserved bits. */
4817 {
4818 /* CR4 MB1 bits. */
4819 uint64_t const u64Cr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
4820 if ((pVmcs->u64GuestCr4.u & u64Cr4Fixed0) == u64Cr4Fixed0)
4821 { /* likely */ }
4822 else
4823 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0);
4824
4825 /* CR4 MBZ bits. */
4826 uint64_t const u64Cr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
4827 if (!(pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1))
4828 { /* likely */ }
4829 else
4830 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1);
4831 }
4832
4833 /* DEBUGCTL MSR. */
4834 if ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4835 || !(pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL))
4836 { /* likely */ }
4837 else
4838 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDebugCtl);
4839
4840 /* 64-bit CPU checks. */
4841 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4842 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4843 {
4844 if (fGstInLongMode)
4845 {
4846 /* PAE must be set. */
4847 if ( (pVmcs->u64GuestCr0.u & X86_CR0_PG)
4848 && (pVmcs->u64GuestCr0.u & X86_CR4_PAE))
4849 { /* likely */ }
4850 else
4851 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPae);
4852 }
4853 else
4854 {
4855 /* PCIDE should not be set. */
4856 if (!(pVmcs->u64GuestCr4.u & X86_CR4_PCIDE))
4857 { /* likely */ }
4858 else
4859 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPcide);
4860 }
4861
4862 /* CR3. */
4863 if (!(pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
4864 { /* likely */ }
4865 else
4866 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr3);
4867
4868 /* DR7. */
4869 if ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4870 || !(pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK))
4871 { /* likely */ }
4872 else
4873 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDr7);
4874
4875 /* SYSENTER ESP and SYSENTER EIP. */
4876 if ( X86_IS_CANONICAL(pVmcs->u64GuestSysenterEsp.u)
4877 && X86_IS_CANONICAL(pVmcs->u64GuestSysenterEip.u))
4878 { /* likely */ }
4879 else
4880 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSysenterEspEip);
4881 }
4882
4883 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
4884 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
4885
4886 /* PAT MSR. */
4887 if ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4888 || CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u))
4889 { /* likely */ }
4890 else
4891 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPatMsr);
4892
4893 /* EFER MSR. */
4894 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4895 {
4896 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
4897 if (!(pVmcs->u64GuestEferMsr.u & ~uValidEferMask))
4898 { /* likely */ }
4899 else
4900 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsrRsvd);
4901
4902 bool const fGstLma = RT_BOOL(pVmcs->u64GuestEferMsr.u & MSR_K6_EFER_LMA);
4903 bool const fGstLme = RT_BOOL(pVmcs->u64GuestEferMsr.u & MSR_K6_EFER_LME);
4904 if ( fGstLma == fGstInLongMode
4905 && ( !(pVmcs->u64GuestCr0.u & X86_CR0_PG)
4906 || fGstLma == fGstLme))
4907 { /* likely */ }
4908 else
4909 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsr);
4910 }
4911
4912 /* We don't support IA32_BNDCFGS MSR yet. */
4913 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
4914
4915 NOREF(pszInstr);
4916 NOREF(pszFailure);
4917 return VINF_SUCCESS;
4918}
4919
4920
4921/**
4922 * Checks guest segment registers, LDTR and TR as part of VM-entry.
4923 *
4924 * @param pVCpu The cross context virtual CPU structure.
4925 * @param pszInstr The VMX instruction name (for logging purposes).
4926 */
4927DECLINLINE(int) iemVmxVmentryCheckGuestSegRegs(PVMCPUCC pVCpu, const char *pszInstr)
4928{
4929 /*
4930 * Segment registers.
4931 * See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
4932 */
4933 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
4934 const char * const pszFailure = "VM-exit";
4935 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);
4936 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
4937 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4938
4939 /* Selectors. */
4940 if ( !fGstInV86Mode
4941 && !fUnrestrictedGuest
4942 && (pVmcs->GuestSs & X86_SEL_RPL) != (pVmcs->GuestCs & X86_SEL_RPL))
4943 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelCsSsRpl);
4944
4945 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
4946 {
4947 CPUMSELREG SelReg;
4948 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &SelReg);
4949 if (RT_LIKELY(rc == VINF_SUCCESS))
4950 { /* likely */ }
4951 else
4952 return rc;
4953
4954 /*
4955 * Virtual-8086 mode checks.
4956 */
4957 if (fGstInV86Mode)
4958 {
4959 /* Base address. */
4960 if (SelReg.u64Base == (uint64_t)SelReg.Sel << 4)
4961 { /* likely */ }
4962 else
4963 {
4964 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBaseV86(iSegReg);
4965 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4966 }
4967
4968 /* Limit. */
4969 if (SelReg.u32Limit == 0xffff)
4970 { /* likely */ }
4971 else
4972 {
4973 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegLimitV86(iSegReg);
4974 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4975 }
4976
4977 /* Attribute. */
4978 if (SelReg.Attr.u == 0xf3)
4979 { /* likely */ }
4980 else
4981 {
4982 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrV86(iSegReg);
4983 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4984 }
4985
4986 /* We're done; move to checking the next segment. */
4987 continue;
4988 }
4989
4990 /* Checks done by 64-bit CPUs. */
4991 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4992 {
4993 /* Base address. */
4994 if ( iSegReg == X86_SREG_FS
4995 || iSegReg == X86_SREG_GS)
4996 {
4997 if (X86_IS_CANONICAL(SelReg.u64Base))
4998 { /* likely */ }
4999 else
5000 {
5001 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
5002 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5003 }
5004 }
5005 else if (iSegReg == X86_SREG_CS)
5006 {
5007 if (!RT_HI_U32(SelReg.u64Base))
5008 { /* likely */ }
5009 else
5010 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseCs);
5011 }
5012 else
5013 {
5014 if ( SelReg.Attr.n.u1Unusable
5015 || !RT_HI_U32(SelReg.u64Base))
5016 { /* likely */ }
5017 else
5018 {
5019 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
5020 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5021 }
5022 }
5023 }
5024
5025 /*
5026 * Checks outside Virtual-8086 mode.
5027 */
5028 uint8_t const uSegType = SelReg.Attr.n.u4Type;
5029 uint8_t const fCodeDataSeg = SelReg.Attr.n.u1DescType;
5030 uint8_t const fUsable = !SelReg.Attr.n.u1Unusable;
5031 uint8_t const uDpl = SelReg.Attr.n.u2Dpl;
5032 uint8_t const fPresent = SelReg.Attr.n.u1Present;
5033 uint8_t const uGranularity = SelReg.Attr.n.u1Granularity;
5034 uint8_t const uDefBig = SelReg.Attr.n.u1DefBig;
5035 uint8_t const fSegLong = SelReg.Attr.n.u1Long;
5036
5037 /* Code or usable segment. */
5038 if ( iSegReg == X86_SREG_CS
5039 || fUsable)
5040 {
5041 /* Reserved bits (bits 31:17 and bits 11:8). */
5042 if (!(SelReg.Attr.u & 0xfffe0f00))
5043 { /* likely */ }
5044 else
5045 {
5046 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrRsvd(iSegReg);
5047 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5048 }
5049
5050 /* Descriptor type. */
5051 if (fCodeDataSeg)
5052 { /* likely */ }
5053 else
5054 {
5055 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDescType(iSegReg);
5056 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5057 }
5058
5059 /* Present. */
5060 if (fPresent)
5061 { /* likely */ }
5062 else
5063 {
5064 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrPresent(iSegReg);
5065 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5066 }
5067
5068 /* Granularity. */
5069 if ( ((SelReg.u32Limit & 0x00000fff) == 0x00000fff || !uGranularity)
5070 && ((SelReg.u32Limit & 0xfff00000) == 0x00000000 || uGranularity))
5071 { /* likely */ }
5072 else
5073 {
5074 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrGran(iSegReg);
5075 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5076 }
5077 }
5078
5079 if (iSegReg == X86_SREG_CS)
5080 {
5081 /* Segment Type and DPL. */
5082 if ( uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5083 && fUnrestrictedGuest)
5084 {
5085 if (uDpl == 0)
5086 { /* likely */ }
5087 else
5088 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplZero);
5089 }
5090 else if ( uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_ACCESSED)
5091 || uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
5092 {
5093 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5094 if (uDpl == AttrSs.n.u2Dpl)
5095 { /* likely */ }
5096 else
5097 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplEqSs);
5098 }
5099 else if ((uSegType & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
5100 == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
5101 {
5102 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5103 if (uDpl <= AttrSs.n.u2Dpl)
5104 { /* likely */ }
5105 else
5106 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplLtSs);
5107 }
5108 else
5109 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsType);
5110
5111 /* Def/Big. */
5112 if ( fGstInLongMode
5113 && fSegLong)
5114 {
5115 if (uDefBig == 0)
5116 { /* likely */ }
5117 else
5118 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDefBig);
5119 }
5120 }
5121 else if (iSegReg == X86_SREG_SS)
5122 {
5123 /* Segment Type. */
5124 if ( !fUsable
5125 || uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5126 || uSegType == (X86_SEL_TYPE_DOWN | X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED))
5127 { /* likely */ }
5128 else
5129 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsType);
5130
5131 /* DPL. */
5132 if (!fUnrestrictedGuest)
5133 {
5134 if (uDpl == (SelReg.Sel & X86_SEL_RPL))
5135 { /* likely */ }
5136 else
5137 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplEqRpl);
5138 }
5139 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
5140 if ( AttrCs.n.u4Type == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5141 || !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
5142 {
5143 if (uDpl == 0)
5144 { /* likely */ }
5145 else
5146 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplZero);
5147 }
5148 }
5149 else
5150 {
5151 /* DS, ES, FS, GS. */
5152 if (fUsable)
5153 {
5154 /* Segment type. */
5155 if (uSegType & X86_SEL_TYPE_ACCESSED)
5156 { /* likely */ }
5157 else
5158 {
5159 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrTypeAcc(iSegReg);
5160 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5161 }
5162
5163 if ( !(uSegType & X86_SEL_TYPE_CODE)
5164 || (uSegType & X86_SEL_TYPE_READ))
5165 { /* likely */ }
5166 else
5167 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsTypeRead);
5168
5169 /* DPL. */
5170 if ( !fUnrestrictedGuest
5171 && uSegType <= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
5172 {
5173 if (uDpl >= (SelReg.Sel & X86_SEL_RPL))
5174 { /* likely */ }
5175 else
5176 {
5177 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDplRpl(iSegReg);
5178 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5179 }
5180 }
5181 }
5182 }
5183 }
5184
5185 /*
5186 * LDTR.
5187 */
5188 {
5189 CPUMSELREG Ldtr;
5190 Ldtr.Sel = pVmcs->GuestLdtr;
5191 Ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
5192 Ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
5193 Ldtr.Attr.u = pVmcs->u32GuestLdtrAttr;
5194
5195 if (!Ldtr.Attr.n.u1Unusable)
5196 {
5197 /* Selector. */
5198 if (!(Ldtr.Sel & X86_SEL_LDT))
5199 { /* likely */ }
5200 else
5201 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelLdtr);
5202
5203 /* Base. */
5204 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5205 {
5206 if (X86_IS_CANONICAL(Ldtr.u64Base))
5207 { /* likely */ }
5208 else
5209 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseLdtr);
5210 }
5211
5212 /* Attributes. */
5213 /* Reserved bits (bits 31:17 and bits 11:8). */
5214 if (!(Ldtr.Attr.u & 0xfffe0f00))
5215 { /* likely */ }
5216 else
5217 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrRsvd);
5218
5219 if (Ldtr.Attr.n.u4Type == X86_SEL_TYPE_SYS_LDT)
5220 { /* likely */ }
5221 else
5222 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrType);
5223
5224 if (!Ldtr.Attr.n.u1DescType)
5225 { /* likely */ }
5226 else
5227 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrDescType);
5228
5229 if (Ldtr.Attr.n.u1Present)
5230 { /* likely */ }
5231 else
5232 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrPresent);
5233
5234 if ( ((Ldtr.u32Limit & 0x00000fff) == 0x00000fff || !Ldtr.Attr.n.u1Granularity)
5235 && ((Ldtr.u32Limit & 0xfff00000) == 0x00000000 || Ldtr.Attr.n.u1Granularity))
5236 { /* likely */ }
5237 else
5238 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrGran);
5239 }
5240 }
5241
5242 /*
5243 * TR.
5244 */
5245 {
5246 CPUMSELREG Tr;
5247 Tr.Sel = pVmcs->GuestTr;
5248 Tr.u32Limit = pVmcs->u32GuestTrLimit;
5249 Tr.u64Base = pVmcs->u64GuestTrBase.u;
5250 Tr.Attr.u = pVmcs->u32GuestTrAttr;
5251
5252 /* Selector. */
5253 if (!(Tr.Sel & X86_SEL_LDT))
5254 { /* likely */ }
5255 else
5256 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelTr);
5257
5258 /* Base. */
5259 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5260 {
5261 if (X86_IS_CANONICAL(Tr.u64Base))
5262 { /* likely */ }
5263 else
5264 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseTr);
5265 }
5266
5267 /* Attributes. */
5268 /* Reserved bits (bits 31:17 and bits 11:8). */
5269 if (!(Tr.Attr.u & 0xfffe0f00))
5270 { /* likely */ }
5271 else
5272 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrRsvd);
5273
5274 if (!Tr.Attr.n.u1Unusable)
5275 { /* likely */ }
5276 else
5277 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrUnusable);
5278
5279 if ( Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY
5280 || ( !fGstInLongMode
5281 && Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY))
5282 { /* likely */ }
5283 else
5284 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrType);
5285
5286 if (!Tr.Attr.n.u1DescType)
5287 { /* likely */ }
5288 else
5289 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrDescType);
5290
5291 if (Tr.Attr.n.u1Present)
5292 { /* likely */ }
5293 else
5294 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrPresent);
5295
5296 if ( ((Tr.u32Limit & 0x00000fff) == 0x00000fff || !Tr.Attr.n.u1Granularity)
5297 && ((Tr.u32Limit & 0xfff00000) == 0x00000000 || Tr.Attr.n.u1Granularity))
5298 { /* likely */ }
5299 else
5300 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrGran);
5301 }
5302
5303 NOREF(pszInstr);
5304 NOREF(pszFailure);
5305 return VINF_SUCCESS;
5306}
5307
5308
5309/**
5310 * Checks guest GDTR and IDTR as part of VM-entry.
5311 *
5312 * @param pVCpu The cross context virtual CPU structure.
5313 * @param pszInstr The VMX instruction name (for logging purposes).
5314 */
5315DECLINLINE(int) iemVmxVmentryCheckGuestGdtrIdtr(PVMCPUCC pVCpu, const char *pszInstr)
5316{
5317 /*
5318 * GDTR and IDTR.
5319 * See Intel spec. 26.3.1.3 "Checks on Guest Descriptor-Table Registers".
5320 */
5321 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5322 const char *const pszFailure = "VM-exit";
5323
5324 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5325 {
5326 /* Base. */
5327 if (X86_IS_CANONICAL(pVmcs->u64GuestGdtrBase.u))
5328 { /* likely */ }
5329 else
5330 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrBase);
5331
5332 if (X86_IS_CANONICAL(pVmcs->u64GuestIdtrBase.u))
5333 { /* likely */ }
5334 else
5335 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrBase);
5336 }
5337
5338 /* Limit. */
5339 if (!RT_HI_U16(pVmcs->u32GuestGdtrLimit))
5340 { /* likely */ }
5341 else
5342 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrLimit);
5343
5344 if (!RT_HI_U16(pVmcs->u32GuestIdtrLimit))
5345 { /* likely */ }
5346 else
5347 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrLimit);
5348
5349 NOREF(pszInstr);
5350 NOREF(pszFailure);
5351 return VINF_SUCCESS;
5352}
5353
5354
5355/**
5356 * Checks guest RIP and RFLAGS as part of VM-entry.
5357 *
5358 * @param pVCpu The cross context virtual CPU structure.
5359 * @param pszInstr The VMX instruction name (for logging purposes).
5360 */
5361DECLINLINE(int) iemVmxVmentryCheckGuestRipRFlags(PVMCPUCC pVCpu, const char *pszInstr)
5362{
5363 /*
5364 * RIP and RFLAGS.
5365 * See Intel spec. 26.3.1.4 "Checks on Guest RIP and RFLAGS".
5366 */
5367 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5368 const char *const pszFailure = "VM-exit";
5369 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5370
5371 /* RIP. */
5372 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5373 {
5374 X86DESCATTR AttrCs;
5375 AttrCs.u = pVmcs->u32GuestCsAttr;
5376 if ( !fGstInLongMode
5377 || !AttrCs.n.u1Long)
5378 {
5379 if (!RT_HI_U32(pVmcs->u64GuestRip.u))
5380 { /* likely */ }
5381 else
5382 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRipRsvd);
5383 }
5384
5385 if ( fGstInLongMode
5386 && AttrCs.n.u1Long)
5387 {
5388 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth == 48); /* Canonical. */
5389 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth < 64
5390 && X86_IS_CANONICAL(pVmcs->u64GuestRip.u))
5391 { /* likely */ }
5392 else
5393 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRip);
5394 }
5395 }
5396
5397 /* RFLAGS (bits 63:22 (or 31:22), bits 15, 5, 3 are reserved, bit 1 MB1). */
5398 uint64_t const uGuestRFlags = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode ? pVmcs->u64GuestRFlags.u
5399 : pVmcs->u64GuestRFlags.s.Lo;
5400 if ( !(uGuestRFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK))
5401 && (uGuestRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK)
5402 { /* likely */ }
5403 else
5404 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsRsvd);
5405
5406 if ( fGstInLongMode
5407 || !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
5408 {
5409 if (!(uGuestRFlags & X86_EFL_VM))
5410 { /* likely */ }
5411 else
5412 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsVm);
5413 }
5414
5415 if (VMX_ENTRY_INT_INFO_IS_EXT_INT(pVmcs->u32EntryIntInfo))
5416 {
5417 if (uGuestRFlags & X86_EFL_IF)
5418 { /* likely */ }
5419 else
5420 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsIf);
5421 }
5422
5423 NOREF(pszInstr);
5424 NOREF(pszFailure);
5425 return VINF_SUCCESS;
5426}
5427
5428
5429/**
5430 * Checks guest non-register state as part of VM-entry.
5431 *
5432 * @param pVCpu The cross context virtual CPU structure.
5433 * @param pszInstr The VMX instruction name (for logging purposes).
5434 */
5435DECLINLINE(int) iemVmxVmentryCheckGuestNonRegState(PVMCPUCC pVCpu, const char *pszInstr)
5436{
5437 /*
5438 * Guest non-register state.
5439 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
5440 */
5441 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5442 const char *const pszFailure = "VM-exit";
5443
5444 /*
5445 * Activity state.
5446 */
5447 uint64_t const u64GuestVmxMiscMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Misc;
5448 uint32_t const fActivityStateMask = RT_BF_GET(u64GuestVmxMiscMsr, VMX_BF_MISC_ACTIVITY_STATES);
5449 if (!(pVmcs->u32GuestActivityState & fActivityStateMask))
5450 { /* likely */ }
5451 else
5452 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateRsvd);
5453
5454 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5455 if ( !AttrSs.n.u2Dpl
5456 || pVmcs->u32GuestActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT)
5457 { /* likely */ }
5458 else
5459 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateSsDpl);
5460
5461 if ( pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI
5462 || pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5463 {
5464 if (pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE)
5465 { /* likely */ }
5466 else
5467 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateStiMovSs);
5468 }
5469
5470 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
5471 {
5472 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
5473 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(pVmcs->u32EntryIntInfo);
5474 AssertCompile(VMX_V_GUEST_ACTIVITY_STATE_MASK == (VMX_VMCS_GUEST_ACTIVITY_HLT | VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN));
5475 switch (pVmcs->u32GuestActivityState)
5476 {
5477 case VMX_VMCS_GUEST_ACTIVITY_HLT:
5478 {
5479 if ( uType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT
5480 || uType == VMX_ENTRY_INT_INFO_TYPE_NMI
5481 || ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5482 && ( uVector == X86_XCPT_DB
5483 || uVector == X86_XCPT_MC))
5484 || ( uType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
5485 && uVector == VMX_ENTRY_INT_INFO_VECTOR_MTF))
5486 { /* likely */ }
5487 else
5488 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateHlt);
5489 break;
5490 }
5491
5492 case VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN:
5493 {
5494 if ( uType == VMX_ENTRY_INT_INFO_TYPE_NMI
5495 || ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5496 && uVector == X86_XCPT_MC))
5497 { /* likely */ }
5498 else
5499 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateShutdown);
5500 break;
5501 }
5502
5503 case VMX_VMCS_GUEST_ACTIVITY_ACTIVE:
5504 default:
5505 break;
5506 }
5507 }
5508
5509 /*
5510 * Interruptibility state.
5511 */
5512 if (!(pVmcs->u32GuestIntrState & ~VMX_VMCS_GUEST_INT_STATE_MASK))
5513 { /* likely */ }
5514 else
5515 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRsvd);
5516
5517 if ((pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5518 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5519 { /* likely */ }
5520 else
5521 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateStiMovSs);
5522
5523 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_IF)
5524 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5525 { /* likely */ }
5526 else
5527 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRFlagsSti);
5528
5529 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
5530 {
5531 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
5532 if (uType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5533 {
5534 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
5535 { /* likely */ }
5536 else
5537 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateExtInt);
5538 }
5539 else if (uType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5540 {
5541 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
5542 { /* likely */ }
5543 else
5544 {
5545 /*
5546 * We don't support injecting NMIs when blocking-by-STI would be in effect.
5547 * We update the Exit qualification only when blocking-by-STI is set
5548 * without blocking-by-MovSS being set. Although in practise it does not
5549 * make much difference since the order of checks are implementation defined.
5550 */
5551 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5552 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_NMI_INJECT);
5553 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateNmi);
5554 }
5555
5556 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5557 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI))
5558 { /* likely */ }
5559 else
5560 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateVirtNmi);
5561 }
5562 }
5563
5564 /* We don't support SMM yet. So blocking-by-SMIs must not be set. */
5565 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI))
5566 { /* likely */ }
5567 else
5568 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateSmi);
5569
5570 /* We don't support SGX yet. So enclave-interruption must not be set. */
5571 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_ENCLAVE))
5572 { /* likely */ }
5573 else
5574 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateEnclave);
5575
5576 /*
5577 * Pending debug exceptions.
5578 */
5579 uint64_t const uPendingDbgXcpts = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode
5580 ? pVmcs->u64GuestPendingDbgXcpts.u
5581 : pVmcs->u64GuestPendingDbgXcpts.s.Lo;
5582 if (!(uPendingDbgXcpts & ~VMX_VMCS_GUEST_PENDING_DEBUG_VALID_MASK))
5583 { /* likely */ }
5584 else
5585 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRsvd);
5586
5587 if ( (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5588 || pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5589 {
5590 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_TF)
5591 && !(pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF)
5592 && !(uPendingDbgXcpts & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
5593 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf);
5594
5595 if ( ( !(pVmcs->u64GuestRFlags.u & X86_EFL_TF)
5596 || (pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF))
5597 && (uPendingDbgXcpts & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
5598 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf);
5599 }
5600
5601 /* We don't support RTM (Real-time Transactional Memory) yet. */
5602 if (!(uPendingDbgXcpts & VMX_VMCS_GUEST_PENDING_DEBUG_RTM))
5603 { /* likely */ }
5604 else
5605 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRtm);
5606
5607 /*
5608 * VMCS link pointer.
5609 */
5610 if (pVmcs->u64VmcsLinkPtr.u != UINT64_C(0xffffffffffffffff))
5611 {
5612 RTGCPHYS const GCPhysShadowVmcs = pVmcs->u64VmcsLinkPtr.u;
5613 /* We don't support SMM yet (so VMCS link pointer cannot be the current VMCS). */
5614 if (GCPhysShadowVmcs != IEM_VMX_GET_CURRENT_VMCS(pVCpu))
5615 { /* likely */ }
5616 else
5617 {
5618 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5619 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrCurVmcs);
5620 }
5621
5622 /* Validate the address. */
5623 if ( !(GCPhysShadowVmcs & X86_PAGE_4K_OFFSET_MASK)
5624 && !(GCPhysShadowVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5625 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysShadowVmcs))
5626 { /* likely */ }
5627 else
5628 {
5629 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5630 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmcsLinkPtr);
5631 }
5632 }
5633
5634 NOREF(pszInstr);
5635 NOREF(pszFailure);
5636 return VINF_SUCCESS;
5637}
5638
5639
5640#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5641/**
5642 * Checks guest PDPTEs as part of VM-entry.
5643 *
5644 * @param pVCpu The cross context virtual CPU structure.
5645 * @param pszInstr The VMX instruction name (for logging purposes).
5646 */
5647IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPUCC pVCpu, const char *pszInstr)
5648{
5649 /*
5650 * Guest PDPTEs.
5651 * See Intel spec. 26.3.1.5 "Checks on Guest Page-Directory-Pointer-Table Entries".
5652 */
5653 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5654 const char * const pszFailure = "VM-exit";
5655
5656 /*
5657 * When EPT is used, we only validate the PAE PDPTEs provided in the VMCS.
5658 * Otherwise, we load any PAE PDPTEs referenced by CR3 at a later point.
5659 */
5660 if ( iemVmxVmcsIsGuestPaePagingEnabled(pVmcs)
5661 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT))
5662 {
5663 /* Get PDPTEs from the VMCS. */
5664 X86PDPE aPaePdptes[X86_PG_PAE_PDPE_ENTRIES];
5665 aPaePdptes[0].u = pVmcs->u64GuestPdpte0.u;
5666 aPaePdptes[1].u = pVmcs->u64GuestPdpte1.u;
5667 aPaePdptes[2].u = pVmcs->u64GuestPdpte2.u;
5668 aPaePdptes[3].u = pVmcs->u64GuestPdpte3.u;
5669
5670 /* Check validity of the PDPTEs. */
5671 bool const fValid = PGMGstArePaePdpesValid(pVCpu, &aPaePdptes[0]);
5672 if (fValid)
5673 { /* likely */ }
5674 else
5675 {
5676 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
5677 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte);
5678 }
5679 }
5680
5681 NOREF(pszFailure);
5682 NOREF(pszInstr);
5683 return VINF_SUCCESS;
5684}
5685#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
5686
5687
5688/**
5689 * Checks guest-state as part of VM-entry.
5690 *
5691 * @returns VBox status code.
5692 * @param pVCpu The cross context virtual CPU structure.
5693 * @param pszInstr The VMX instruction name (for logging purposes).
5694 */
5695IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPUCC pVCpu, const char *pszInstr)
5696{
5697 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
5698 if (RT_SUCCESS(rc))
5699 {
5700 rc = iemVmxVmentryCheckGuestSegRegs(pVCpu, pszInstr);
5701 if (RT_SUCCESS(rc))
5702 {
5703 rc = iemVmxVmentryCheckGuestGdtrIdtr(pVCpu, pszInstr);
5704 if (RT_SUCCESS(rc))
5705 {
5706 rc = iemVmxVmentryCheckGuestRipRFlags(pVCpu, pszInstr);
5707 if (RT_SUCCESS(rc))
5708 {
5709 rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr);
5710#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5711 if (RT_SUCCESS(rc))
5712 rc = iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr);
5713#endif
5714 }
5715 }
5716 }
5717 }
5718 return rc;
5719}
5720
5721
5722/**
5723 * Checks host-state as part of VM-entry.
5724 *
5725 * @returns VBox status code.
5726 * @param pVCpu The cross context virtual CPU structure.
5727 * @param pszInstr The VMX instruction name (for logging purposes).
5728 */
5729IEM_STATIC int iemVmxVmentryCheckHostState(PVMCPUCC pVCpu, const char *pszInstr)
5730{
5731 /*
5732 * Host Control Registers and MSRs.
5733 * See Intel spec. 26.2.2 "Checks on Host Control Registers and MSRs".
5734 */
5735 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5736 const char * const pszFailure = "VMFail";
5737
5738 /* CR0 reserved bits. */
5739 {
5740 /* CR0 MB1 bits. */
5741 uint64_t const u64Cr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
5742 if ((pVmcs->u64HostCr0.u & u64Cr0Fixed0) == u64Cr0Fixed0)
5743 { /* likely */ }
5744 else
5745 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0);
5746
5747 /* CR0 MBZ bits. */
5748 uint64_t const u64Cr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
5749 if (!(pVmcs->u64HostCr0.u & ~u64Cr0Fixed1))
5750 { /* likely */ }
5751 else
5752 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1);
5753 }
5754
5755 /* CR4 reserved bits. */
5756 {
5757 /* CR4 MB1 bits. */
5758 uint64_t const u64Cr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
5759 if ((pVmcs->u64HostCr4.u & u64Cr4Fixed0) == u64Cr4Fixed0)
5760 { /* likely */ }
5761 else
5762 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0);
5763
5764 /* CR4 MBZ bits. */
5765 uint64_t const u64Cr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
5766 if (!(pVmcs->u64HostCr4.u & ~u64Cr4Fixed1))
5767 { /* likely */ }
5768 else
5769 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1);
5770 }
5771
5772 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5773 {
5774 /* CR3 reserved bits. */
5775 if (!(pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
5776 { /* likely */ }
5777 else
5778 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr3);
5779
5780 /* SYSENTER ESP and SYSENTER EIP. */
5781 if ( X86_IS_CANONICAL(pVmcs->u64HostSysenterEsp.u)
5782 && X86_IS_CANONICAL(pVmcs->u64HostSysenterEip.u))
5783 { /* likely */ }
5784 else
5785 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSysenterEspEip);
5786 }
5787
5788 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
5789 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PERF_MSR));
5790
5791 /* PAT MSR. */
5792 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
5793 || CPUMIsPatMsrValid(pVmcs->u64HostPatMsr.u))
5794 { /* likely */ }
5795 else
5796 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostPatMsr);
5797
5798 /* EFER MSR. */
5799 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
5800 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
5801 || !(pVmcs->u64HostEferMsr.u & ~uValidEferMask))
5802 { /* likely */ }
5803 else
5804 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsrRsvd);
5805
5806 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
5807 bool const fHostLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_LMA);
5808 bool const fHostLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_LME);
5809 if ( fHostInLongMode == fHostLma
5810 && fHostInLongMode == fHostLme)
5811 { /* likely */ }
5812 else
5813 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsr);
5814
5815 /*
5816 * Host Segment and Descriptor-Table Registers.
5817 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
5818 */
5819 /* Selector RPL and TI. */
5820 if ( !(pVmcs->HostCs & (X86_SEL_RPL | X86_SEL_LDT))
5821 && !(pVmcs->HostSs & (X86_SEL_RPL | X86_SEL_LDT))
5822 && !(pVmcs->HostDs & (X86_SEL_RPL | X86_SEL_LDT))
5823 && !(pVmcs->HostEs & (X86_SEL_RPL | X86_SEL_LDT))
5824 && !(pVmcs->HostFs & (X86_SEL_RPL | X86_SEL_LDT))
5825 && !(pVmcs->HostGs & (X86_SEL_RPL | X86_SEL_LDT))
5826 && !(pVmcs->HostTr & (X86_SEL_RPL | X86_SEL_LDT)))
5827 { /* likely */ }
5828 else
5829 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSel);
5830
5831 /* CS and TR selectors cannot be 0. */
5832 if ( pVmcs->HostCs
5833 && pVmcs->HostTr)
5834 { /* likely */ }
5835 else
5836 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCsTr);
5837
5838 /* SS cannot be 0 if 32-bit host. */
5839 if ( fHostInLongMode
5840 || pVmcs->HostSs)
5841 { /* likely */ }
5842 else
5843 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSs);
5844
5845 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5846 {
5847 /* FS, GS, GDTR, IDTR, TR base address. */
5848 if ( X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
5849 && X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
5850 && X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u)
5851 && X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u)
5852 && X86_IS_CANONICAL(pVmcs->u64HostTrBase.u))
5853 { /* likely */ }
5854 else
5855 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSegBase);
5856 }
5857
5858 /*
5859 * Host address-space size for 64-bit CPUs.
5860 * See Intel spec. 26.2.4 "Checks Related to Address-Space Size".
5861 */
5862 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5863 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5864 {
5865 bool const fCpuInLongMode = CPUMIsGuestInLongMode(pVCpu);
5866
5867 /* Logical processor in IA-32e mode. */
5868 if (fCpuInLongMode)
5869 {
5870 if (fHostInLongMode)
5871 {
5872 /* PAE must be set. */
5873 if (pVmcs->u64HostCr4.u & X86_CR4_PAE)
5874 { /* likely */ }
5875 else
5876 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pae);
5877
5878 /* RIP must be canonical. */
5879 if (X86_IS_CANONICAL(pVmcs->u64HostRip.u))
5880 { /* likely */ }
5881 else
5882 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRip);
5883 }
5884 else
5885 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostLongMode);
5886 }
5887 else
5888 {
5889 /* Logical processor is outside IA-32e mode. */
5890 if ( !fGstInLongMode
5891 && !fHostInLongMode)
5892 {
5893 /* PCIDE should not be set. */
5894 if (!(pVmcs->u64HostCr4.u & X86_CR4_PCIDE))
5895 { /* likely */ }
5896 else
5897 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pcide);
5898
5899 /* The high 32-bits of RIP MBZ. */
5900 if (!pVmcs->u64HostRip.s.Hi)
5901 { /* likely */ }
5902 else
5903 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRipRsvd);
5904 }
5905 else
5906 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongMode);
5907 }
5908 }
5909 else
5910 {
5911 /* Host address-space size for 32-bit CPUs. */
5912 if ( !fGstInLongMode
5913 && !fHostInLongMode)
5914 { /* likely */ }
5915 else
5916 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongModeNoCpu);
5917 }
5918
5919 NOREF(pszInstr);
5920 NOREF(pszFailure);
5921 return VINF_SUCCESS;
5922}
5923
5924
5925#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5926/**
5927 * Checks the EPT pointer VMCS field as part of VM-entry.
5928 *
5929 * @returns VBox status code.
5930 * @param pVCpu The cross context virtual CPU structure.
5931 * @param penmVmxDiag Where to store the diagnostic reason on failure (not
5932 * updated on success). Optional, can be NULL.
5933 */
5934IEM_STATIC int iemVmxVmentryCheckEptPtr(PVMCPUCC pVCpu, VMXVDIAG *penmVmxDiag)
5935{
5936 VMXVDIAG enmVmxDiag;
5937 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5938
5939 /* Reserved bits. */
5940 uint8_t const cMaxPhysAddrWidth = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth;
5941 uint64_t const fValidMask = VMX_EPTP_VALID_MASK & ~(UINT64_MAX << cMaxPhysAddrWidth);
5942 if (pVmcs->u64EptPtr.u & fValidMask)
5943 {
5944 /* Memory Type. */
5945 uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps;
5946 uint8_t const fMemType = RT_BF_GET(pVmcs->u64EptPtr.u, VMX_BF_EPTP_MEMTYPE);
5947 if ( ( fMemType == VMX_EPTP_MEMTYPE_WB
5948 && RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_MEMTYPE_WB))
5949 || ( fMemType == VMX_EPTP_MEMTYPE_UC
5950 && RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_MEMTYPE_UC)))
5951 {
5952 /*
5953 * Page walk length (PML4).
5954 * Intel used to specify bit 7 of IA32_VMX_EPT_VPID_CAP as page walk length
5955 * of 5 but that seems to be removed from the latest specs. leaving only PML4
5956 * as the maximum supported page-walk level hence we hardcode it as 3 (1 less than 4)
5957 */
5958 Assert(RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_PAGE_WALK_LENGTH_4));
5959 if (RT_BF_GET(pVmcs->u64EptPtr.u, VMX_BF_EPTP_PAGE_WALK_LENGTH) == 3)
5960 {
5961 /* Access and dirty bits support in EPT structures. */
5962 if ( !RT_BF_GET(pVmcs->u64EptPtr.u, VMX_BF_EPTP_ACCESS_DIRTY)
5963 || RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY))
5964 return VINF_SUCCESS;
5965
5966 enmVmxDiag = kVmxVDiag_Vmentry_EptpAccessDirty;
5967 }
5968 else
5969 enmVmxDiag = kVmxVDiag_Vmentry_EptpPageWalkLength;
5970 }
5971 else
5972 enmVmxDiag = kVmxVDiag_Vmentry_EptpMemType;
5973 }
5974 else
5975 enmVmxDiag = kVmxVDiag_Vmentry_EptpRsvd;
5976
5977 if (penmVmxDiag)
5978 *penmVmxDiag = enmVmxDiag;
5979 return VERR_VMX_VMENTRY_FAILED;
5980}
5981#endif
5982
5983
5984/**
5985 * Checks VMCS controls fields as part of VM-entry.
5986 *
5987 * @returns VBox status code.
5988 * @param pVCpu The cross context virtual CPU structure.
5989 * @param pszInstr The VMX instruction name (for logging purposes).
5990 *
5991 * @remarks This may update secondary-processor based VM-execution control fields
5992 * in the current VMCS if necessary.
5993 */
5994IEM_STATIC int iemVmxVmentryCheckCtls(PVMCPUCC pVCpu, const char *pszInstr)
5995{
5996 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
5997 const char * const pszFailure = "VMFail";
5998
5999 /*
6000 * VM-execution controls.
6001 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
6002 */
6003 {
6004 /* Pin-based VM-execution controls. */
6005 {
6006 VMXCTLSMSR const PinCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.PinCtls;
6007 if (!(~pVmcs->u32PinCtls & PinCtls.n.allowed0))
6008 { /* likely */ }
6009 else
6010 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
6011
6012 if (!(pVmcs->u32PinCtls & ~PinCtls.n.allowed1))
6013 { /* likely */ }
6014 else
6015 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1);
6016 }
6017
6018 /* Processor-based VM-execution controls. */
6019 {
6020 VMXCTLSMSR const ProcCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls;
6021 if (!(~pVmcs->u32ProcCtls & ProcCtls.n.allowed0))
6022 { /* likely */ }
6023 else
6024 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
6025
6026 if (!(pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1))
6027 { /* likely */ }
6028 else
6029 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1);
6030 }
6031
6032 /* Secondary processor-based VM-execution controls. */
6033 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
6034 {
6035 VMXCTLSMSR const ProcCtls2 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls2;
6036 if (!(~pVmcs->u32ProcCtls2 & ProcCtls2.n.allowed0))
6037 { /* likely */ }
6038 else
6039 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
6040
6041 if (!(pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1))
6042 { /* likely */ }
6043 else
6044 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1);
6045 }
6046 else
6047 Assert(!pVmcs->u32ProcCtls2);
6048
6049 /* CR3-target count. */
6050 if (pVmcs->u32Cr3TargetCount <= VMX_V_CR3_TARGET_COUNT)
6051 { /* likely */ }
6052 else
6053 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Cr3TargetCount);
6054
6055 /* I/O bitmaps physical addresses. */
6056 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
6057 {
6058 RTGCPHYS const GCPhysIoBitmapA = pVmcs->u64AddrIoBitmapA.u;
6059 if ( !(GCPhysIoBitmapA & X86_PAGE_4K_OFFSET_MASK)
6060 && !(GCPhysIoBitmapA >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6061 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysIoBitmapA))
6062 { /* likely */ }
6063 else
6064 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA);
6065
6066 RTGCPHYS const GCPhysIoBitmapB = pVmcs->u64AddrIoBitmapB.u;
6067 if ( !(GCPhysIoBitmapB & X86_PAGE_4K_OFFSET_MASK)
6068 && !(GCPhysIoBitmapB >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6069 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysIoBitmapB))
6070 { /* likely */ }
6071 else
6072 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
6073 }
6074
6075 /* MSR bitmap physical address. */
6076 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
6077 {
6078 RTGCPHYS const GCPhysMsrBitmap = pVmcs->u64AddrMsrBitmap.u;
6079 if ( !(GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
6080 && !(GCPhysMsrBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6081 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysMsrBitmap))
6082 { /* likely */ }
6083 else
6084 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
6085 }
6086
6087 /* TPR shadow related controls. */
6088 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
6089 {
6090 /* Virtual-APIC page physical address. */
6091 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
6092 if ( !(GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
6093 && !(GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6094 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
6095 { /* likely */ }
6096 else
6097 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
6098
6099 /* TPR threshold bits 31:4 MBZ without virtual-interrupt delivery. */
6100 if ( !(pVmcs->u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK)
6101 || (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
6102 { /* likely */ }
6103 else
6104 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdRsvd);
6105
6106 /* The rest done XXX document */
6107 }
6108 else
6109 {
6110 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6111 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
6112 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
6113 { /* likely */ }
6114 else
6115 {
6116 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6117 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicTprShadow);
6118 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
6119 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ApicRegVirt);
6120 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
6121 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtIntDelivery);
6122 }
6123 }
6124
6125 /* NMI exiting and virtual-NMIs. */
6126 if ( (pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
6127 || !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
6128 { /* likely */ }
6129 else
6130 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi);
6131
6132 /* Virtual-NMIs and NMI-window exiting. */
6133 if ( (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6134 || !(pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
6135 { /* likely */ }
6136 else
6137 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit);
6138
6139 /* Virtualize APIC accesses. */
6140 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
6141 {
6142 /* APIC-access physical address. */
6143 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
6144 if ( !(GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
6145 && !(GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6146 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
6147 { /* likely */ }
6148 else
6149 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
6150
6151 /*
6152 * Disallow APIC-access page and virtual-APIC page from being the same address.
6153 * Note! This is not an Intel requirement, but one imposed by our implementation.
6154 */
6155 /** @todo r=ramshankar: This is done primarily to simplify recursion scenarios while
6156 * redirecting accesses between the APIC-access page and the virtual-APIC
6157 * page. If any nested hypervisor requires this, we can implement it later. */
6158 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
6159 {
6160 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
6161 if (GCPhysVirtApic != GCPhysApicAccess)
6162 { /* likely */ }
6163 else
6164 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessEqVirtApic);
6165 }
6166 }
6167
6168 /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */
6169 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6170 || !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
6171 { /* likely */ }
6172 else
6173 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
6174
6175 /* Virtual-interrupt delivery requires external interrupt exiting. */
6176 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
6177 || (pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
6178 { /* likely */ }
6179 else
6180 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
6181
6182 /* VPID. */
6183 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID)
6184 || pVmcs->u16Vpid != 0)
6185 { /* likely */ }
6186 else
6187 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Vpid);
6188
6189#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6190 /* Extended-Page-Table Pointer (EPTP). */
6191 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
6192 {
6193 VMXVDIAG enmVmxDiag;
6194 int const rc = iemVmxVmentryCheckEptPtr(pVCpu, &enmVmxDiag);
6195 if (RT_SUCCESS(rc))
6196 { /* likely */ }
6197 else
6198 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmVmxDiag);
6199 }
6200#else
6201 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)); /* We don't support EPT yet. */
6202#endif
6203
6204 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_POSTED_INT)); /* We don't support posted interrupts yet. */
6205 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PML)); /* We don't support PML yet. */
6206 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)); /* We don't support Unrestricted-guests yet. */
6207 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMFUNC)); /* We don't support VM functions yet. */
6208 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT_XCPT_VE)); /* We don't support EPT-violation #VE yet. */
6209 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)); /* We don't support Pause-loop exiting yet. */
6210 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_TSC_SCALING)); /* We don't support TSC-scaling yet. */
6211
6212 /* VMCS shadowing. */
6213 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
6214 {
6215 /* VMREAD-bitmap physical address. */
6216 RTGCPHYS const GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
6217 if ( !(GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
6218 && !(GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6219 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
6220 { /* likely */ }
6221 else
6222 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap);
6223
6224 /* VMWRITE-bitmap physical address. */
6225 RTGCPHYS const GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
6226 if ( !(GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
6227 && !(GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6228 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
6229 { /* likely */ }
6230 else
6231 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
6232 }
6233 }
6234
6235 /*
6236 * VM-exit controls.
6237 * See Intel spec. 26.2.1.2 "VM-Exit Control Fields".
6238 */
6239 {
6240 VMXCTLSMSR const ExitCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ExitCtls;
6241 if (!(~pVmcs->u32ExitCtls & ExitCtls.n.allowed0))
6242 { /* likely */ }
6243 else
6244 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
6245
6246 if (!(pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1))
6247 { /* likely */ }
6248 else
6249 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1);
6250
6251 /* Save preemption timer without activating it. */
6252 if ( (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
6253 || !(pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
6254 { /* likely */ }
6255 else
6256 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer);
6257
6258 /* VM-exit MSR-store count and VM-exit MSR-store area address. */
6259 if (pVmcs->u32ExitMsrStoreCount)
6260 {
6261 if ( !(pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
6262 && !(pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6263 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
6264 { /* likely */ }
6265 else
6266 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore);
6267 }
6268
6269 /* VM-exit MSR-load count and VM-exit MSR-load area address. */
6270 if (pVmcs->u32ExitMsrLoadCount)
6271 {
6272 if ( !(pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
6273 && !(pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6274 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
6275 { /* likely */ }
6276 else
6277 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad);
6278 }
6279 }
6280
6281 /*
6282 * VM-entry controls.
6283 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
6284 */
6285 {
6286 VMXCTLSMSR const EntryCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.EntryCtls;
6287 if (!(~pVmcs->u32EntryCtls & EntryCtls.n.allowed0))
6288 { /* likely */ }
6289 else
6290 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
6291
6292 if (!(pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1))
6293 { /* likely */ }
6294 else
6295 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1);
6296
6297 /* Event injection. */
6298 uint32_t const uIntInfo = pVmcs->u32EntryIntInfo;
6299 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VALID))
6300 {
6301 /* Type and vector. */
6302 uint8_t const uType = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_TYPE);
6303 uint8_t const uVector = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VECTOR);
6304 uint8_t const uRsvd = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_RSVD_12_30);
6305 if ( !uRsvd
6306 && VMXIsEntryIntInfoTypeValid(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxMonitorTrapFlag, uType)
6307 && VMXIsEntryIntInfoVectorValid(uVector, uType))
6308 { /* likely */ }
6309 else
6310 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd);
6311
6312 /* Exception error code. */
6313 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID))
6314 {
6315 /* Delivery possible only in Unrestricted-guest mode when CR0.PE is set. */
6316 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
6317 || (pVmcs->u64GuestCr0.s.Lo & X86_CR0_PE))
6318 { /* likely */ }
6319 else
6320 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodePe);
6321
6322 /* Exceptions that provide an error code. */
6323 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
6324 && ( uVector == X86_XCPT_DF
6325 || uVector == X86_XCPT_TS
6326 || uVector == X86_XCPT_NP
6327 || uVector == X86_XCPT_SS
6328 || uVector == X86_XCPT_GP
6329 || uVector == X86_XCPT_PF
6330 || uVector == X86_XCPT_AC))
6331 { /* likely */ }
6332 else
6333 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec);
6334
6335 /* Exception error-code reserved bits. */
6336 if (!(pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK))
6337 { /* likely */ }
6338 else
6339 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd);
6340
6341 /* Injecting a software interrupt, software exception or privileged software exception. */
6342 if ( uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
6343 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
6344 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
6345 {
6346 /* Instruction length must be in the range 0-15. */
6347 if (pVmcs->u32EntryInstrLen <= VMX_ENTRY_INSTR_LEN_MAX)
6348 { /* likely */ }
6349 else
6350 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLen);
6351
6352 /* However, instruction length of 0 is allowed only when its CPU feature is present. */
6353 if ( pVmcs->u32EntryInstrLen != 0
6354 || IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEntryInjectSoftInt)
6355 { /* likely */ }
6356 else
6357 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLenZero);
6358 }
6359 }
6360 }
6361
6362 /* VM-entry MSR-load count and VM-entry MSR-load area address. */
6363 if (pVmcs->u32EntryMsrLoadCount)
6364 {
6365 if ( !(pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
6366 && !(pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6367 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
6368 { /* likely */ }
6369 else
6370 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad);
6371 }
6372
6373 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)); /* We don't support SMM yet. */
6374 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON)); /* We don't support dual-monitor treatment yet. */
6375 }
6376
6377 NOREF(pszInstr);
6378 NOREF(pszFailure);
6379 return VINF_SUCCESS;
6380}
6381
6382
6383/**
6384 * Loads the guest control registers, debug register and some MSRs as part of
6385 * VM-entry.
6386 *
6387 * @param pVCpu The cross context virtual CPU structure.
6388 */
6389IEM_STATIC void iemVmxVmentryLoadGuestControlRegsMsrs(PVMCPUCC pVCpu)
6390{
6391 /*
6392 * Load guest control registers, debug registers and MSRs.
6393 * See Intel spec. 26.3.2.1 "Loading Guest Control Registers, Debug Registers and MSRs".
6394 */
6395 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
6396
6397 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6398 uint64_t const uGstCr0 = (pVmcs->u64GuestCr0.u & ~VMX_ENTRY_GUEST_CR0_IGNORE_MASK)
6399 | (pVCpu->cpum.GstCtx.cr0 & VMX_ENTRY_GUEST_CR0_IGNORE_MASK);
6400 pVCpu->cpum.GstCtx.cr0 = uGstCr0;
6401 pVCpu->cpum.GstCtx.cr4 = pVmcs->u64GuestCr4.u;
6402 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64GuestCr3.u;
6403
6404 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
6405 pVCpu->cpum.GstCtx.dr[7] = (pVmcs->u64GuestDr7.u & ~VMX_ENTRY_GUEST_DR7_MBZ_MASK) | VMX_ENTRY_GUEST_DR7_MB1_MASK;
6406
6407 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64GuestSysenterEip.s.Lo;
6408 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64GuestSysenterEsp.s.Lo;
6409 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32GuestSysenterCS;
6410
6411 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6412 {
6413 /* FS base and GS base are loaded while loading the rest of the guest segment registers. */
6414
6415 /* EFER MSR. */
6416 if (!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR))
6417 {
6418 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
6419 uint64_t const uHostEfer = pVCpu->cpum.GstCtx.msrEFER;
6420 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
6421 bool const fGstPaging = RT_BOOL(uGstCr0 & X86_CR0_PG);
6422 if (fGstInLongMode)
6423 {
6424 /* If the nested-guest is in long mode, LMA and LME are both set. */
6425 Assert(fGstPaging);
6426 pVCpu->cpum.GstCtx.msrEFER = uHostEfer | (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
6427 }
6428 else
6429 {
6430 /*
6431 * If the nested-guest is outside long mode:
6432 * - With paging: LMA is cleared, LME is cleared.
6433 * - Without paging: LMA is cleared, LME is left unmodified.
6434 */
6435 uint64_t const fLmaLmeMask = MSR_K6_EFER_LMA | (fGstPaging ? MSR_K6_EFER_LME : 0);
6436 pVCpu->cpum.GstCtx.msrEFER = uHostEfer & ~fLmaLmeMask;
6437 }
6438 }
6439 /* else: see below. */
6440 }
6441
6442 /* PAT MSR. */
6443 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
6444 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64GuestPatMsr.u;
6445
6446 /* EFER MSR. */
6447 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
6448 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64GuestEferMsr.u;
6449
6450 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
6451 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
6452
6453 /* We don't support IA32_BNDCFGS MSR yet. */
6454 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
6455
6456 /* Nothing to do for SMBASE register - We don't support SMM yet. */
6457}
6458
6459
6460/**
6461 * Loads the guest segment registers, GDTR, IDTR, LDTR and TR as part of VM-entry.
6462 *
6463 * @param pVCpu The cross context virtual CPU structure.
6464 */
6465IEM_STATIC void iemVmxVmentryLoadGuestSegRegs(PVMCPUCC pVCpu)
6466{
6467 /*
6468 * Load guest segment registers, GDTR, IDTR, LDTR and TR.
6469 * See Intel spec. 26.3.2.2 "Loading Guest Segment Registers and Descriptor-Table Registers".
6470 */
6471 /* CS, SS, ES, DS, FS, GS. */
6472 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
6473 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
6474 {
6475 PCPUMSELREG pGstSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6476 CPUMSELREG VmcsSelReg;
6477 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &VmcsSelReg);
6478 AssertRC(rc); NOREF(rc);
6479 if (!(VmcsSelReg.Attr.u & X86DESCATTR_UNUSABLE))
6480 {
6481 pGstSelReg->Sel = VmcsSelReg.Sel;
6482 pGstSelReg->ValidSel = VmcsSelReg.Sel;
6483 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6484 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6485 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
6486 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
6487 }
6488 else
6489 {
6490 pGstSelReg->Sel = VmcsSelReg.Sel;
6491 pGstSelReg->ValidSel = VmcsSelReg.Sel;
6492 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6493 switch (iSegReg)
6494 {
6495 case X86_SREG_CS:
6496 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6497 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
6498 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
6499 break;
6500
6501 case X86_SREG_SS:
6502 pGstSelReg->u64Base = VmcsSelReg.u64Base & UINT32_C(0xfffffff0);
6503 pGstSelReg->u32Limit = 0;
6504 pGstSelReg->Attr.u = (VmcsSelReg.Attr.u & X86DESCATTR_DPL) | X86DESCATTR_D | X86DESCATTR_UNUSABLE;
6505 break;
6506
6507 case X86_SREG_ES:
6508 case X86_SREG_DS:
6509 pGstSelReg->u64Base = 0;
6510 pGstSelReg->u32Limit = 0;
6511 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
6512 break;
6513
6514 case X86_SREG_FS:
6515 case X86_SREG_GS:
6516 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6517 pGstSelReg->u32Limit = 0;
6518 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
6519 break;
6520 }
6521 Assert(pGstSelReg->Attr.n.u1Unusable);
6522 }
6523 }
6524
6525 /* LDTR. */
6526 pVCpu->cpum.GstCtx.ldtr.Sel = pVmcs->GuestLdtr;
6527 pVCpu->cpum.GstCtx.ldtr.ValidSel = pVmcs->GuestLdtr;
6528 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
6529 if (!(pVmcs->u32GuestLdtrAttr & X86DESCATTR_UNUSABLE))
6530 {
6531 pVCpu->cpum.GstCtx.ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
6532 pVCpu->cpum.GstCtx.ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
6533 pVCpu->cpum.GstCtx.ldtr.Attr.u = pVmcs->u32GuestLdtrAttr;
6534 }
6535 else
6536 {
6537 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
6538 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
6539 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
6540 }
6541
6542 /* TR. */
6543 Assert(!(pVmcs->u32GuestTrAttr & X86DESCATTR_UNUSABLE));
6544 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->GuestTr;
6545 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->GuestTr;
6546 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
6547 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64GuestTrBase.u;
6548 pVCpu->cpum.GstCtx.tr.u32Limit = pVmcs->u32GuestTrLimit;
6549 pVCpu->cpum.GstCtx.tr.Attr.u = pVmcs->u32GuestTrAttr;
6550
6551 /* GDTR. */
6552 pVCpu->cpum.GstCtx.gdtr.cbGdt = pVmcs->u32GuestGdtrLimit;
6553 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64GuestGdtrBase.u;
6554
6555 /* IDTR. */
6556 pVCpu->cpum.GstCtx.idtr.cbIdt = pVmcs->u32GuestIdtrLimit;
6557 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64GuestIdtrBase.u;
6558}
6559
6560
6561/**
6562 * Loads the guest MSRs from the VM-entry MSR-load area as part of VM-entry.
6563 *
6564 * @returns VBox status code.
6565 * @param pVCpu The cross context virtual CPU structure.
6566 * @param pszInstr The VMX instruction name (for logging purposes).
6567 */
6568IEM_STATIC int iemVmxVmentryLoadGuestAutoMsrs(PVMCPUCC pVCpu, const char *pszInstr)
6569{
6570 /*
6571 * Load guest MSRs.
6572 * See Intel spec. 26.4 "Loading MSRs".
6573 */
6574 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
6575 const char *const pszFailure = "VM-exit";
6576
6577 /*
6578 * The VM-entry MSR-load area address need not be a valid guest-physical address if the
6579 * VM-entry MSR load count is 0. If this is the case, bail early without reading it.
6580 * See Intel spec. 24.8.2 "VM-Entry Controls for MSRs".
6581 */
6582 uint32_t const cMsrs = RT_MIN(pVmcs->u32EntryMsrLoadCount, RT_ELEMENTS(pVCpu->cpum.GstCtx.hwvirt.vmx.aEntryMsrLoadArea));
6583 if (!cMsrs)
6584 return VINF_SUCCESS;
6585
6586 /*
6587 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count is
6588 * exceeded including possibly raising #MC exceptions during VMX transition. Our
6589 * implementation shall fail VM-entry with an VMX_EXIT_ERR_MSR_LOAD VM-exit.
6590 */
6591 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
6592 if (fIsMsrCountValid)
6593 { /* likely */ }
6594 else
6595 {
6596 iemVmxVmcsSetExitQual(pVCpu, VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
6597 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadCount);
6598 }
6599
6600 RTGCPHYS const GCPhysVmEntryMsrLoadArea = pVmcs->u64AddrEntryMsrLoad.u;
6601 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.aEntryMsrLoadArea[0],
6602 GCPhysVmEntryMsrLoadArea, cMsrs * sizeof(VMXAUTOMSR));
6603 if (RT_SUCCESS(rc))
6604 {
6605 PCVMXAUTOMSR pMsr = &pVCpu->cpum.GstCtx.hwvirt.vmx.aEntryMsrLoadArea[0];
6606 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
6607 {
6608 if ( !pMsr->u32Reserved
6609 && pMsr->u32Msr != MSR_K8_FS_BASE
6610 && pMsr->u32Msr != MSR_K8_GS_BASE
6611 && pMsr->u32Msr != MSR_K6_EFER
6612 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
6613 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
6614 {
6615 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
6616 if (rcStrict == VINF_SUCCESS)
6617 continue;
6618
6619 /*
6620 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-entry.
6621 * If any nested hypervisor loads MSRs that require ring-3 handling, we cause a VM-entry failure
6622 * recording the MSR index in the Exit qualification (as per the Intel spec.) and indicated
6623 * further by our own, specific diagnostic code. Later, we can try implement handling of the
6624 * MSR in ring-0 if possible, or come up with a better, generic solution.
6625 */
6626 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
6627 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
6628 ? kVmxVDiag_Vmentry_MsrLoadRing3
6629 : kVmxVDiag_Vmentry_MsrLoad;
6630 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
6631 }
6632 else
6633 {
6634 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
6635 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadRsvd);
6636 }
6637 }
6638 }
6639 else
6640 {
6641 AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysVmEntryMsrLoadArea, rc));
6642 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadPtrReadPhys);
6643 }
6644
6645 NOREF(pszInstr);
6646 NOREF(pszFailure);
6647 return VINF_SUCCESS;
6648}
6649
6650
6651/**
6652 * Loads the guest-state non-register state as part of VM-entry.
6653 *
6654 * @returns VBox status code.
6655 * @param pVCpu The cross context virtual CPU structure.
6656 * @param pszInstr The VMX instruction name (for logging purposes).
6657 *
6658 * @remarks This must be called only after loading the nested-guest register state
6659 * (especially nested-guest RIP).
6660 */
6661IEM_STATIC int iemVmxVmentryLoadGuestNonRegState(PVMCPUCC pVCpu, const char *pszInstr)
6662{
6663 /*
6664 * Load guest non-register state.
6665 * See Intel spec. 26.6 "Special Features of VM Entry"
6666 */
6667 const char *const pszFailure = "VM-exit";
6668 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
6669
6670 /*
6671 * If VM-entry is not vectoring, block-by-STI and block-by-MovSS state must be loaded.
6672 * If VM-entry is vectoring, there is no block-by-STI or block-by-MovSS.
6673 *
6674 * See Intel spec. 26.6.1 "Interruptibility State".
6675 */
6676 bool const fEntryVectoring = VMXIsVmentryVectoring(pVmcs->u32EntryIntInfo, NULL /* puEntryIntInfoType */);
6677 if ( !fEntryVectoring
6678 && (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)))
6679 EMSetInhibitInterruptsPC(pVCpu, pVmcs->u64GuestRip.u);
6680 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6681 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6682
6683 /* NMI blocking. */
6684 if (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
6685 {
6686 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6687 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = true;
6688 else
6689 {
6690 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false;
6691 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
6692 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6693 }
6694 }
6695 else
6696 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false;
6697
6698 /* SMI blocking is irrelevant. We don't support SMIs yet. */
6699
6700 /*
6701 * Set PGM's copy of the EPT pointer.
6702 * The EPTP has already been validated while checking guest state.
6703 *
6704 * It is important to do this prior to mapping PAE PDPTEs (below).
6705 */
6706 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
6707 PGMSetGuestEptPtr(pVCpu, pVmcs->u64EptPtr.u);
6708
6709 /*
6710 * Load the guest's PAE PDPTEs.
6711 */
6712 if (iemVmxVmcsIsGuestPaePagingEnabled(pVmcs))
6713 {
6714 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)
6715 {
6716 /*
6717 * With EPT, we've already validated these while checking the guest state.
6718 * Just load them directly from the VMCS here.
6719 */
6720 X86PDPE aPaePdptes[X86_PG_PAE_PDPE_ENTRIES];
6721 aPaePdptes[0].u = pVmcs->u64GuestPdpte0.u;
6722 aPaePdptes[1].u = pVmcs->u64GuestPdpte1.u;
6723 aPaePdptes[2].u = pVmcs->u64GuestPdpte2.u;
6724 aPaePdptes[3].u = pVmcs->u64GuestPdpte3.u;
6725 AssertCompile(RT_ELEMENTS(aPaePdptes) == RT_ELEMENTS(pVCpu->cpum.GstCtx.aPaePdpes));
6726 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->cpum.GstCtx.aPaePdpes); i++)
6727 pVCpu->cpum.GstCtx.aPaePdpes[i].u = aPaePdptes[i].u;
6728 }
6729 else
6730 {
6731 /*
6732 * Without EPT, we must load the PAE PDPTEs referenced by CR3.
6733 * This involves loading (and mapping) CR3 and validating them now.
6734 */
6735 int const rc = PGMGstMapPaePdpesAtCr3(pVCpu, pVmcs->u64GuestCr3.u);
6736 if (RT_SUCCESS(rc))
6737 { /* likely */ }
6738 else
6739 {
6740 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
6741 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpte);
6742 }
6743 }
6744 }
6745
6746 /* VPID is irrelevant. We don't support VPID yet. */
6747
6748 /* Clear address-range monitoring. */
6749 EMMonitorWaitClear(pVCpu);
6750
6751 return VINF_SUCCESS;
6752}
6753
6754
6755/**
6756 * Loads the guest VMCS referenced state (such as MSR bitmaps, I/O bitmaps etc).
6757 *
6758 * @param pVCpu The cross context virtual CPU structure.
6759 * @param pszInstr The VMX instruction name (for logging purposes).
6760 *
6761 * @remarks This assumes various VMCS related data structure pointers have already
6762 * been verified prior to calling this function.
6763 */
6764IEM_STATIC int iemVmxVmentryLoadGuestVmcsRefState(PVMCPUCC pVCpu, const char *pszInstr)
6765{
6766 const char *const pszFailure = "VM-exit";
6767 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
6768
6769 /*
6770 * Virtualize APIC accesses.
6771 */
6772 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
6773 {
6774 /* APIC-access physical address. */
6775 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
6776
6777 /*
6778 * Register the handler for the APIC-access page.
6779 *
6780 * We don't deregister the APIC-access page handler during the VM-exit as a different
6781 * nested-VCPU might be using the same guest-physical address for its APIC-access page.
6782 *
6783 * We leave the page registered until the first access that happens outside VMX non-root
6784 * mode. Guest software is allowed to access structures such as the APIC-access page
6785 * only when no logical processor with a current VMCS references it in VMX non-root mode,
6786 * otherwise it can lead to unpredictable behavior including guest triple-faults.
6787 *
6788 * See Intel spec. 24.11.4 "Software Access to Related Structures".
6789 */
6790 if (!PGMHandlerPhysicalIsRegistered(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
6791 {
6792 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
6793 PVMCPUCC pVCpu0 = VMCC_GET_CPU_0(pVM);
6794 int rc = PGMHandlerPhysicalRegister(pVM, GCPhysApicAccess, GCPhysApicAccess + X86_PAGE_4K_SIZE - 1,
6795 pVCpu0->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
6796 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
6797 if (RT_SUCCESS(rc))
6798 { /* likely */ }
6799 else
6800 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessHandlerReg);
6801 }
6802 }
6803
6804 /*
6805 * VMCS shadowing.
6806 */
6807 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
6808 {
6809 /* Read the VMREAD-bitmap. */
6810 RTGCPHYS const GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
6811 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.abVmreadBitmap[0],
6812 GCPhysVmreadBitmap, sizeof(pVCpu->cpum.GstCtx.hwvirt.vmx.abVmreadBitmap));
6813 if (RT_SUCCESS(rc))
6814 { /* likely */ }
6815 else
6816 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys);
6817
6818 /* Read the VMWRITE-bitmap. */
6819 RTGCPHYS const GCPhysVmwriteBitmap = pVmcs->u64AddrVmwriteBitmap.u;
6820 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.abVmwriteBitmap[0],
6821 GCPhysVmwriteBitmap, sizeof(pVCpu->cpum.GstCtx.hwvirt.vmx.abVmwriteBitmap));
6822 if (RT_SUCCESS(rc))
6823 { /* likely */ }
6824 else
6825 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys);
6826 }
6827
6828 /*
6829 * I/O bitmaps.
6830 */
6831 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
6832 {
6833 /* Read the IO bitmap A. */
6834 RTGCPHYS const GCPhysIoBitmapA = pVmcs->u64AddrIoBitmapA.u;
6835 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.abIoBitmap[0],
6836 GCPhysIoBitmapA, VMX_V_IO_BITMAP_A_SIZE);
6837 if (RT_SUCCESS(rc))
6838 { /* likely */ }
6839 else
6840 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_IoBitmapAPtrReadPhys);
6841
6842 /* Read the IO bitmap B. */
6843 RTGCPHYS const GCPhysIoBitmapB = pVmcs->u64AddrIoBitmapB.u;
6844 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.abIoBitmap[VMX_V_IO_BITMAP_A_SIZE],
6845 GCPhysIoBitmapB, VMX_V_IO_BITMAP_B_SIZE);
6846 if (RT_SUCCESS(rc))
6847 { /* likely */ }
6848 else
6849 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_IoBitmapBPtrReadPhys);
6850 }
6851
6852 /*
6853 * TPR shadow and Virtual-APIC page.
6854 */
6855 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
6856 {
6857 /* Verify TPR threshold and VTPR when both virtualize-APIC accesses and virtual-interrupt delivery aren't used. */
6858 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
6859 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
6860 {
6861 /* Read the VTPR from the virtual-APIC page. */
6862 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
6863 uint8_t u8VTpr;
6864 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &u8VTpr, GCPhysVirtApic + XAPIC_OFF_TPR, sizeof(u8VTpr));
6865 if (RT_SUCCESS(rc))
6866 { /* likely */ }
6867 else
6868 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
6869
6870 /* Bits 3:0 of the TPR-threshold must not be greater than bits 7:4 of VTPR. */
6871 if ((uint8_t)RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) <= (u8VTpr & 0xf0))
6872 { /* likely */ }
6873 else
6874 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
6875 }
6876 }
6877
6878 /*
6879 * VMCS link pointer.
6880 */
6881 if (pVmcs->u64VmcsLinkPtr.u != UINT64_C(0xffffffffffffffff))
6882 {
6883 /* Read the VMCS-link pointer from guest memory. */
6884 RTGCPHYS const GCPhysShadowVmcs = pVmcs->u64VmcsLinkPtr.u;
6885 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.ShadowVmcs,
6886 GCPhysShadowVmcs, sizeof(pVCpu->cpum.GstCtx.hwvirt.vmx.ShadowVmcs));
6887 if (RT_SUCCESS(rc))
6888 { /* likely */ }
6889 else
6890 {
6891 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
6892 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys);
6893 }
6894
6895 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
6896 if (pVCpu->cpum.GstCtx.hwvirt.vmx.ShadowVmcs.u32VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID)
6897 { /* likely */ }
6898 else
6899 {
6900 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
6901 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrRevId);
6902 }
6903
6904 /* Verify the shadow bit is set if VMCS shadowing is enabled . */
6905 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
6906 || pVCpu->cpum.GstCtx.hwvirt.vmx.ShadowVmcs.u32VmcsRevId.n.fIsShadowVmcs)
6907 { /* likely */ }
6908 else
6909 {
6910 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
6911 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrShadow);
6912 }
6913
6914 /* Update our cache of the guest physical address of the shadow VMCS. */
6915 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs = GCPhysShadowVmcs;
6916 }
6917
6918 /*
6919 * MSR bitmap.
6920 */
6921 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
6922 {
6923 /* Read the MSR bitmap. */
6924 RTGCPHYS const GCPhysMsrBitmap = pVmcs->u64AddrMsrBitmap.u;
6925 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap[0],
6926 GCPhysMsrBitmap, sizeof(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap));
6927 if (RT_SUCCESS(rc))
6928 { /* likely */ }
6929 else
6930 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrBitmapPtrReadPhys);
6931 }
6932
6933 NOREF(pszFailure);
6934 NOREF(pszInstr);
6935 return VINF_SUCCESS;
6936}
6937
6938
6939/**
6940 * Loads the guest-state as part of VM-entry.
6941 *
6942 * @returns VBox status code.
6943 * @param pVCpu The cross context virtual CPU structure.
6944 * @param pszInstr The VMX instruction name (for logging purposes).
6945 *
6946 * @remarks This must be done after all the necessary steps prior to loading of
6947 * guest-state (e.g. checking various VMCS state).
6948 */
6949IEM_STATIC int iemVmxVmentryLoadGuestState(PVMCPUCC pVCpu, const char *pszInstr)
6950{
6951 /* Load guest control registers, MSRs (that are directly part of the VMCS). */
6952 iemVmxVmentryLoadGuestControlRegsMsrs(pVCpu);
6953
6954 /* Load guest segment registers. */
6955 iemVmxVmentryLoadGuestSegRegs(pVCpu);
6956
6957 /*
6958 * Load guest RIP, RSP and RFLAGS.
6959 * See Intel spec. 26.3.2.3 "Loading Guest RIP, RSP and RFLAGS".
6960 */
6961 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
6962 pVCpu->cpum.GstCtx.rsp = pVmcs->u64GuestRsp.u;
6963 pVCpu->cpum.GstCtx.rip = pVmcs->u64GuestRip.u;
6964 pVCpu->cpum.GstCtx.rflags.u = pVmcs->u64GuestRFlags.u;
6965
6966 /* Initialize the PAUSE-loop controls as part of VM-entry. */
6967 pVCpu->cpum.GstCtx.hwvirt.vmx.uFirstPauseLoopTick = 0;
6968 pVCpu->cpum.GstCtx.hwvirt.vmx.uPrevPauseTick = 0;
6969
6970 /* Load guest non-register state (such as interrupt shadows, NMI blocking etc). */
6971 int rc = iemVmxVmentryLoadGuestNonRegState(pVCpu, pszInstr);
6972 if (rc == VINF_SUCCESS)
6973 { /* likely */ }
6974 else
6975 return rc;
6976
6977 /* Load VMX related structures and state referenced by the VMCS. */
6978 rc = iemVmxVmentryLoadGuestVmcsRefState(pVCpu, pszInstr);
6979 if (rc == VINF_SUCCESS)
6980 { /* likely */ }
6981 else
6982 return rc;
6983
6984 NOREF(pszInstr);
6985 return VINF_SUCCESS;
6986}
6987
6988
6989/**
6990 * Returns whether there are is a pending debug exception on VM-entry.
6991 *
6992 * @param pVCpu The cross context virtual CPU structure.
6993 * @param pszInstr The VMX instruction name (for logging purposes).
6994 */
6995IEM_STATIC bool iemVmxVmentryIsPendingDebugXcpt(PVMCPUCC pVCpu, const char *pszInstr)
6996{
6997 /*
6998 * Pending debug exceptions.
6999 * See Intel spec. 26.6.3 "Delivery of Pending Debug Exceptions after VM Entry".
7000 */
7001 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7002 Assert(pVmcs);
7003
7004 bool fPendingDbgXcpt = RT_BOOL(pVmcs->u64GuestPendingDbgXcpts.u & ( VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS
7005 | VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_EN_BP));
7006 if (fPendingDbgXcpt)
7007 {
7008 uint8_t uEntryIntInfoType;
7009 bool const fEntryVectoring = VMXIsVmentryVectoring(pVmcs->u32EntryIntInfo, &uEntryIntInfoType);
7010 if (fEntryVectoring)
7011 {
7012 switch (uEntryIntInfoType)
7013 {
7014 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
7015 case VMX_ENTRY_INT_INFO_TYPE_NMI:
7016 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
7017 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:
7018 fPendingDbgXcpt = false;
7019 break;
7020
7021 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT:
7022 {
7023 /*
7024 * Whether the pending debug exception for software exceptions other than
7025 * #BP and #OF is delivered after injecting the exception or is discard
7026 * is CPU implementation specific. We will discard them (easier).
7027 */
7028 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(pVmcs->u32EntryIntInfo);
7029 if ( uVector != X86_XCPT_BP
7030 && uVector != X86_XCPT_OF)
7031 fPendingDbgXcpt = false;
7032 RT_FALL_THRU();
7033 }
7034 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
7035 {
7036 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
7037 fPendingDbgXcpt = false;
7038 break;
7039 }
7040 }
7041 }
7042 else
7043 {
7044 /*
7045 * When the VM-entry is not vectoring but there is blocking-by-MovSS, whether the
7046 * pending debug exception is held pending or is discarded is CPU implementation
7047 * specific. We will discard them (easier).
7048 */
7049 if (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
7050 fPendingDbgXcpt = false;
7051
7052 /* There's no pending debug exception in the shutdown or wait-for-SIPI state. */
7053 if (pVmcs->u32GuestActivityState & (VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN | VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT))
7054 fPendingDbgXcpt = false;
7055 }
7056 }
7057
7058 NOREF(pszInstr);
7059 return fPendingDbgXcpt;
7060}
7061
7062
7063/**
7064 * Set up the monitor-trap flag (MTF).
7065 *
7066 * @param pVCpu The cross context virtual CPU structure.
7067 * @param pszInstr The VMX instruction name (for logging purposes).
7068 */
7069IEM_STATIC void iemVmxVmentrySetupMtf(PVMCPUCC pVCpu, const char *pszInstr)
7070{
7071 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7072 Assert(pVmcs);
7073 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG)
7074 {
7075 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_MTF);
7076 Log(("%s: Monitor-trap flag set on VM-entry\n", pszInstr));
7077 }
7078 else
7079 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
7080 NOREF(pszInstr);
7081}
7082
7083
7084/**
7085 * Sets up NMI-window exiting.
7086 *
7087 * @param pVCpu The cross context virtual CPU structure.
7088 * @param pszInstr The VMX instruction name (for logging purposes).
7089 */
7090IEM_STATIC void iemVmxVmentrySetupNmiWindow(PVMCPUCC pVCpu, const char *pszInstr)
7091{
7092 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7093 Assert(pVmcs);
7094 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
7095 {
7096 Assert(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI);
7097 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW);
7098 Log(("%s: NMI-window set on VM-entry\n", pszInstr));
7099 }
7100 else
7101 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
7102 NOREF(pszInstr);
7103}
7104
7105
7106/**
7107 * Sets up interrupt-window exiting.
7108 *
7109 * @param pVCpu The cross context virtual CPU structure.
7110 * @param pszInstr The VMX instruction name (for logging purposes).
7111 */
7112IEM_STATIC void iemVmxVmentrySetupIntWindow(PVMCPUCC pVCpu, const char *pszInstr)
7113{
7114 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7115 Assert(pVmcs);
7116 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
7117 {
7118 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW);
7119 Log(("%s: Interrupt-window set on VM-entry\n", pszInstr));
7120 }
7121 else
7122 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
7123 NOREF(pszInstr);
7124}
7125
7126
7127/**
7128 * Set up the VMX-preemption timer.
7129 *
7130 * @param pVCpu The cross context virtual CPU structure.
7131 * @param pszInstr The VMX instruction name (for logging purposes).
7132 */
7133IEM_STATIC void iemVmxVmentrySetupPreemptTimer(PVMCPUCC pVCpu, const char *pszInstr)
7134{
7135 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7136 Assert(pVmcs);
7137 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
7138 {
7139 /*
7140 * If the timer is 0, we must cause a VM-exit before executing the first
7141 * nested-guest instruction. So we can flag as though the timer has already
7142 * expired and we will check and cause a VM-exit at the right priority elsewhere
7143 * in the code.
7144 */
7145 uint64_t uEntryTick;
7146 uint32_t const uPreemptTimer = pVmcs->u32PreemptTimer;
7147 if (uPreemptTimer)
7148 {
7149 int rc = CPUMStartGuestVmxPremptTimer(pVCpu, uPreemptTimer, VMX_V_PREEMPT_TIMER_SHIFT, &uEntryTick);
7150 AssertRC(rc);
7151 Log(("%s: VM-entry set up VMX-preemption timer at %#RX64\n", pszInstr, uEntryTick));
7152 }
7153 else
7154 {
7155 uEntryTick = TMCpuTickGetNoCheck(pVCpu);
7156 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER);
7157 Log(("%s: VM-entry set up VMX-preemption timer at %#RX64 to expire immediately!\n", pszInstr, uEntryTick));
7158 }
7159
7160 pVCpu->cpum.GstCtx.hwvirt.vmx.uEntryTick = uEntryTick;
7161 }
7162 else
7163 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
7164
7165 NOREF(pszInstr);
7166}
7167
7168
7169/**
7170 * Injects an event using TRPM given a VM-entry interruption info. and related
7171 * fields.
7172 *
7173 * @param pVCpu The cross context virtual CPU structure.
7174 * @param pszInstr The VMX instruction name (for logging purposes).
7175 * @param uEntryIntInfo The VM-entry interruption info.
7176 * @param uErrCode The error code associated with the event if any.
7177 * @param cbInstr The VM-entry instruction length (for software
7178 * interrupts and software exceptions). Pass 0
7179 * otherwise.
7180 * @param GCPtrFaultAddress The guest CR2 if this is a \#PF event.
7181 */
7182IEM_STATIC void iemVmxVmentryInjectTrpmEvent(PVMCPUCC pVCpu, const char *pszInstr, uint32_t uEntryIntInfo, uint32_t uErrCode,
7183 uint32_t cbInstr, RTGCUINTPTR GCPtrFaultAddress)
7184{
7185 Assert(VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo));
7186
7187 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
7188 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(uEntryIntInfo);
7189 TRPMEVENT const enmTrpmEvent = HMVmxEventTypeToTrpmEventType(uEntryIntInfo);
7190
7191 Assert(uType != VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT);
7192
7193 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrpmEvent);
7194 AssertRC(rc);
7195 Log(("%s: Injecting: vector=%#x type=%#x (%s)\n", pszInstr, uVector, uType, VMXGetEntryIntInfoTypeDesc(uType)));
7196
7197 if (VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(uEntryIntInfo))
7198 {
7199 TRPMSetErrorCode(pVCpu, uErrCode);
7200 Log(("%s: Injecting: err_code=%#x\n", pszInstr, uErrCode));
7201 }
7202
7203 if (VMX_ENTRY_INT_INFO_IS_XCPT_PF(uEntryIntInfo))
7204 {
7205 TRPMSetFaultAddress(pVCpu, GCPtrFaultAddress);
7206 Log(("%s: Injecting: fault_addr=%RGp\n", pszInstr, GCPtrFaultAddress));
7207 }
7208 else
7209 {
7210 if ( uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
7211 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
7212 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
7213 {
7214 TRPMSetInstrLength(pVCpu, cbInstr);
7215 Log(("%s: Injecting: instr_len=%u\n", pszInstr, cbInstr));
7216 }
7217 }
7218
7219 if (VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo) == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
7220 {
7221 TRPMSetTrapDueToIcebp(pVCpu);
7222 Log(("%s: Injecting: icebp\n", pszInstr));
7223 }
7224
7225 NOREF(pszInstr);
7226}
7227
7228
7229/**
7230 * Performs event injection (if any) as part of VM-entry.
7231 *
7232 * @param pVCpu The cross context virtual CPU structure.
7233 * @param pszInstr The VMX instruction name (for logging purposes).
7234 */
7235IEM_STATIC void iemVmxVmentryInjectEvent(PVMCPUCC pVCpu, const char *pszInstr)
7236{
7237 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7238
7239 /*
7240 * Inject events.
7241 * The event that is going to be made pending for injection is not subject to VMX intercepts,
7242 * thus we flag ignoring of intercepts. However, recursive exceptions if any during delivery
7243 * of the current event -are- subject to intercepts, hence this flag will be flipped during
7244 * the actually delivery of this event.
7245 *
7246 * See Intel spec. 26.5 "Event Injection".
7247 */
7248 uint32_t const uEntryIntInfo = pVmcs->u32EntryIntInfo;
7249 bool const fEntryIntInfoValid = VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo);
7250
7251 CPUMSetGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx, !fEntryIntInfoValid);
7252 if (fEntryIntInfoValid)
7253 {
7254 if (VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo) == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT)
7255 {
7256 Assert(VMX_ENTRY_INT_INFO_VECTOR(uEntryIntInfo) == VMX_ENTRY_INT_INFO_VECTOR_MTF);
7257 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_MTF);
7258 }
7259 else
7260 iemVmxVmentryInjectTrpmEvent(pVCpu, pszInstr, uEntryIntInfo, pVmcs->u32EntryXcptErrCode, pVmcs->u32EntryInstrLen,
7261 pVCpu->cpum.GstCtx.cr2);
7262
7263 /*
7264 * We need to clear the VM-entry interruption information field's valid bit on VM-exit.
7265 *
7266 * However, we do it here on VM-entry as well because while it isn't visible to guest
7267 * software until VM-exit, when and if HM looks at the VMCS to continue nested-guest
7268 * execution using hardware-assisted VMX, it will not be try to inject the event again.
7269 *
7270 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7271 */
7272 pVmcs->u32EntryIntInfo &= ~VMX_ENTRY_INT_INFO_VALID;
7273 }
7274 else
7275 {
7276 /*
7277 * Inject any pending guest debug exception.
7278 * Unlike injecting events, this #DB injection on VM-entry is subject to #DB VMX intercept.
7279 * See Intel spec. 26.6.3 "Delivery of Pending Debug Exceptions after VM Entry".
7280 */
7281 bool const fPendingDbgXcpt = iemVmxVmentryIsPendingDebugXcpt(pVCpu, pszInstr);
7282 if (fPendingDbgXcpt)
7283 {
7284 uint32_t const uDbgXcptInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
7285 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
7286 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
7287 iemVmxVmentryInjectTrpmEvent(pVCpu, pszInstr, uDbgXcptInfo, 0 /* uErrCode */, pVmcs->u32EntryInstrLen,
7288 0 /* GCPtrFaultAddress */);
7289 }
7290 }
7291
7292 NOREF(pszInstr);
7293}
7294
7295
7296/**
7297 * Initializes all read-only VMCS fields as part of VM-entry.
7298 *
7299 * @param pVCpu The cross context virtual CPU structure.
7300 */
7301IEM_STATIC void iemVmxVmentryInitReadOnlyFields(PVMCPUCC pVCpu)
7302{
7303 /*
7304 * Any VMCS field which we do not establish on every VM-exit but may potentially
7305 * be used on the VM-exit path of a nested hypervisor -and- is not explicitly
7306 * specified to be undefined, needs to be initialized here.
7307 *
7308 * Thus, it is especially important to clear the Exit qualification field
7309 * since it must be zero for VM-exits where it is not used. Similarly, the
7310 * VM-exit interruption information field's valid bit needs to be cleared for
7311 * the same reasons.
7312 */
7313 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7314 Assert(pVmcs);
7315
7316 /* 16-bit (none currently). */
7317 /* 32-bit. */
7318 pVmcs->u32RoVmInstrError = 0;
7319 pVmcs->u32RoExitReason = 0;
7320 pVmcs->u32RoExitIntInfo = 0;
7321 pVmcs->u32RoExitIntErrCode = 0;
7322 pVmcs->u32RoIdtVectoringInfo = 0;
7323 pVmcs->u32RoIdtVectoringErrCode = 0;
7324 pVmcs->u32RoExitInstrLen = 0;
7325 pVmcs->u32RoExitInstrInfo = 0;
7326
7327 /* 64-bit. */
7328 pVmcs->u64RoGuestPhysAddr.u = 0;
7329
7330 /* Natural-width. */
7331 pVmcs->u64RoExitQual.u = 0;
7332 pVmcs->u64RoIoRcx.u = 0;
7333 pVmcs->u64RoIoRsi.u = 0;
7334 pVmcs->u64RoIoRdi.u = 0;
7335 pVmcs->u64RoIoRip.u = 0;
7336 pVmcs->u64RoGuestLinearAddr.u = 0;
7337}
7338
7339
7340/**
7341 * VMLAUNCH/VMRESUME instruction execution worker.
7342 *
7343 * @returns Strict VBox status code.
7344 * @param pVCpu The cross context virtual CPU structure.
7345 * @param cbInstr The instruction length in bytes.
7346 * @param uInstrId The instruction identity (VMXINSTRID_VMLAUNCH or
7347 * VMXINSTRID_VMRESUME).
7348 *
7349 * @remarks Common VMX instruction checks are already expected to by the caller,
7350 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7351 */
7352IEM_STATIC VBOXSTRICTRC iemVmxVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
7353{
7354# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
7355 RT_NOREF3(pVCpu, cbInstr, uInstrId);
7356 return VINF_EM_RAW_EMULATE_INSTR;
7357# else
7358 Assert( uInstrId == VMXINSTRID_VMLAUNCH
7359 || uInstrId == VMXINSTRID_VMRESUME);
7360 const char *pszInstr = uInstrId == VMXINSTRID_VMRESUME ? "vmresume" : "vmlaunch";
7361
7362 /* Nested-guest intercept. */
7363 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7364 return iemVmxVmexitInstr(pVCpu, uInstrId == VMXINSTRID_VMRESUME ? VMX_EXIT_VMRESUME : VMX_EXIT_VMLAUNCH, cbInstr);
7365
7366 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7367
7368 /*
7369 * Basic VM-entry checks.
7370 * The order of the CPL, current and shadow VMCS and block-by-MovSS are important.
7371 * The checks following that do not have to follow a specific order.
7372 *
7373 * See Intel spec. 26.1 "Basic VM-entry Checks".
7374 */
7375
7376 /* CPL. */
7377 if (pVCpu->iem.s.uCpl == 0)
7378 { /* likely */ }
7379 else
7380 {
7381 Log(("%s: CPL %u -> #GP(0)\n", pszInstr, pVCpu->iem.s.uCpl));
7382 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_Cpl;
7383 return iemRaiseGeneralProtectionFault0(pVCpu);
7384 }
7385
7386 /* Current VMCS valid. */
7387 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
7388 { /* likely */ }
7389 else
7390 {
7391 Log(("%s: VMCS pointer %#RGp invalid -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7392 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrInvalid;
7393 iemVmxVmFailInvalid(pVCpu);
7394 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7395 return VINF_SUCCESS;
7396 }
7397
7398 /* Current VMCS is not a shadow VMCS. */
7399 if (!pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.u32VmcsRevId.n.fIsShadowVmcs)
7400 { /* likely */ }
7401 else
7402 {
7403 Log(("%s: VMCS pointer %#RGp is a shadow VMCS -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7404 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrShadowVmcs;
7405 iemVmxVmFailInvalid(pVCpu);
7406 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7407 return VINF_SUCCESS;
7408 }
7409
7410 /** @todo Distinguish block-by-MovSS from block-by-STI. Currently we
7411 * use block-by-STI here which is not quite correct. */
7412 if ( !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
7413 || pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
7414 { /* likely */ }
7415 else
7416 {
7417 Log(("%s: VM entry with events blocked by MOV SS -> VMFail\n", pszInstr));
7418 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_BlocKMovSS;
7419 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_BLOCK_MOVSS);
7420 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7421 return VINF_SUCCESS;
7422 }
7423
7424 if (uInstrId == VMXINSTRID_VMLAUNCH)
7425 {
7426 /* VMLAUNCH with non-clear VMCS. */
7427 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.fVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR)
7428 { /* likely */ }
7429 else
7430 {
7431 Log(("vmlaunch: VMLAUNCH with non-clear VMCS -> VMFail\n"));
7432 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsClear;
7433 iemVmxVmFail(pVCpu, VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS);
7434 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7435 return VINF_SUCCESS;
7436 }
7437 }
7438 else
7439 {
7440 /* VMRESUME with non-launched VMCS. */
7441 if (pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.fVmcsState == VMX_V_VMCS_LAUNCH_STATE_LAUNCHED)
7442 { /* likely */ }
7443 else
7444 {
7445 Log(("vmresume: VMRESUME with non-launched VMCS -> VMFail\n"));
7446 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsLaunch;
7447 iemVmxVmFail(pVCpu, VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS);
7448 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7449 return VINF_SUCCESS;
7450 }
7451 }
7452
7453 /*
7454 * We are allowed to cache VMCS related data structures (such as I/O bitmaps, MSR bitmaps)
7455 * while entering VMX non-root mode. We do some of this while checking VM-execution
7456 * controls. The nested hypervisor should not make assumptions and cannot expect
7457 * predictable behavior if changes to these structures are made in guest memory while
7458 * executing in VMX non-root mode. As far as VirtualBox is concerned, the guest cannot
7459 * modify them anyway as we cache them in host memory.
7460 *
7461 * See Intel spec. 24.11.4 "Software Access to Related Structures".
7462 */
7463 PVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7464 Assert(pVmcs);
7465 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
7466
7467 int rc = iemVmxVmentryCheckCtls(pVCpu, pszInstr);
7468 if (RT_SUCCESS(rc))
7469 {
7470 rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr);
7471 if (RT_SUCCESS(rc))
7472 {
7473 /*
7474 * Initialize read-only VMCS fields before VM-entry since we don't update all of them
7475 * for every VM-exit. This needs to be done before invoking a VM-exit (even those
7476 * ones that may occur during VM-entry below).
7477 */
7478 iemVmxVmentryInitReadOnlyFields(pVCpu);
7479
7480 /*
7481 * Blocking of NMIs need to be restored if VM-entry fails due to invalid-guest state.
7482 * So we save the VMCPU_FF_BLOCK_NMI force-flag here so we can restore it on
7483 * VM-exit when required.
7484 * See Intel spec. 26.7 "VM-entry Failures During or After Loading Guest State"
7485 */
7486 iemVmxVmentrySaveNmiBlockingFF(pVCpu);
7487
7488 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
7489 if (RT_SUCCESS(rc))
7490 {
7491 /*
7492 * We've now entered nested-guest execution.
7493 *
7494 * It is important do this prior to loading the guest state because
7495 * as part of loading the guest state, PGM (and perhaps other components
7496 * in the future) relies on detecting whether VMX non-root mode has been
7497 * entered.
7498 */
7499 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true;
7500
7501 rc = iemVmxVmentryLoadGuestState(pVCpu, pszInstr);
7502 if (RT_SUCCESS(rc))
7503 {
7504 rc = iemVmxVmentryLoadGuestAutoMsrs(pVCpu, pszInstr);
7505 if (RT_SUCCESS(rc))
7506 {
7507 Assert(rc != VINF_CPUM_R3_MSR_WRITE);
7508
7509 /* VMLAUNCH instruction must update the VMCS launch state. */
7510 if (uInstrId == VMXINSTRID_VMLAUNCH)
7511 pVmcs->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_LAUNCHED;
7512
7513 /* Perform the VMX transition (PGM updates). */
7514 VBOXSTRICTRC rcStrict = iemVmxTransition(pVCpu);
7515 if (rcStrict == VINF_SUCCESS)
7516 { /* likely */ }
7517 else if (RT_SUCCESS(rcStrict))
7518 {
7519 Log3(("%s: iemVmxTransition returns %Rrc -> Setting passup status\n", pszInstr,
7520 VBOXSTRICTRC_VAL(rcStrict)));
7521 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7522 }
7523 else
7524 {
7525 Log3(("%s: iemVmxTransition failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
7526 return rcStrict;
7527 }
7528
7529 /* Paranoia. */
7530 Assert(rcStrict == VINF_SUCCESS);
7531
7532 /*
7533 * The priority of potential VM-exits during VM-entry is important.
7534 * The priorities of VM-exits and events are listed from highest
7535 * to lowest as follows:
7536 *
7537 * 1. Event injection.
7538 * 2. Trap on task-switch (T flag set in TSS).
7539 * 3. TPR below threshold / APIC-write.
7540 * 4. SMI, INIT.
7541 * 5. MTF exit.
7542 * 6. Debug-trap exceptions (EFLAGS.TF), pending debug exceptions.
7543 * 7. VMX-preemption timer.
7544 * 9. NMI-window exit.
7545 * 10. NMI injection.
7546 * 11. Interrupt-window exit.
7547 * 12. Virtual-interrupt injection.
7548 * 13. Interrupt injection.
7549 * 14. Process next instruction (fetch, decode, execute).
7550 */
7551
7552 /* Setup VMX-preemption timer. */
7553 iemVmxVmentrySetupPreemptTimer(pVCpu, pszInstr);
7554
7555 /* Setup monitor-trap flag. */
7556 iemVmxVmentrySetupMtf(pVCpu, pszInstr);
7557
7558 /* Setup NMI-window exiting. */
7559 iemVmxVmentrySetupNmiWindow(pVCpu, pszInstr);
7560
7561 /* Setup interrupt-window exiting. */
7562 iemVmxVmentrySetupIntWindow(pVCpu, pszInstr);
7563
7564 /*
7565 * Inject any event that the nested hypervisor wants to inject.
7566 * Note! We cannot immediately perform the event injection here as we may have
7567 * pending PGM operations to perform due to switching page tables and/or
7568 * mode.
7569 */
7570 iemVmxVmentryInjectEvent(pVCpu, pszInstr);
7571
7572# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
7573 /* Reschedule to IEM-only execution of the nested-guest. */
7574 Log(("%s: Enabling IEM-only EM execution policy!\n", pszInstr));
7575 int rcSched = EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
7576 if (rcSched != VINF_SUCCESS)
7577 iemSetPassUpStatus(pVCpu, rcSched);
7578# endif
7579
7580 /* Finally, done. */
7581 Log3(("%s: cs:rip=%#04x:%#RX64 cr0=%#RX64 (%#RX64) cr4=%#RX64 (%#RX64) efer=%#RX64\n",
7582 pszInstr, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.cr0,
7583 pVmcs->u64Cr0ReadShadow.u, pVCpu->cpum.GstCtx.cr4, pVmcs->u64Cr4ReadShadow.u,
7584 pVCpu->cpum.GstCtx.msrEFER));
7585 return VINF_SUCCESS;
7586 }
7587 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED,
7588 pVmcs->u64RoExitQual.u);
7589 }
7590 }
7591 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED,
7592 pVmcs->u64RoExitQual.u);
7593 }
7594
7595 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_HOST_STATE);
7596 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7597 return VINF_SUCCESS;
7598 }
7599
7600 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
7601 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7602 return VINF_SUCCESS;
7603# endif
7604}
7605
7606
7607/**
7608 * Checks whether an RDMSR or WRMSR instruction for the given MSR is intercepted
7609 * (causes a VM-exit) or not.
7610 *
7611 * @returns @c true if the instruction is intercepted, @c false otherwise.
7612 * @param pVCpu The cross context virtual CPU structure.
7613 * @param uExitReason The VM-exit reason (VMX_EXIT_RDMSR or
7614 * VMX_EXIT_WRMSR).
7615 * @param idMsr The MSR.
7616 */
7617IEM_STATIC bool iemVmxIsRdmsrWrmsrInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr)
7618{
7619 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
7620 Assert( uExitReason == VMX_EXIT_RDMSR
7621 || uExitReason == VMX_EXIT_WRMSR);
7622
7623 /* Consult the MSR bitmap if the feature is supported. */
7624 PCVMXVVMCS const pVmcs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs;
7625 Assert(pVmcs);
7626 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7627 {
7628 uint32_t const fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.abMsrBitmap, idMsr);
7629 if (uExitReason == VMX_EXIT_RDMSR)
7630 return RT_BOOL(fMsrpm & VMXMSRPM_EXIT_RD);
7631 return RT_BOOL(fMsrpm & VMXMSRPM_EXIT_WR);
7632 }
7633
7634 /* Without MSR bitmaps, all MSR accesses are intercepted. */
7635 return true;
7636}
7637
7638
7639/**
7640 * VMREAD instruction execution worker that does not perform any validation checks.
7641 *
7642 * Callers are expected to have performed the necessary checks and to ensure the
7643 * VMREAD will succeed.
7644 *
7645 * @param pVmcs Pointer to the virtual VMCS.
7646 * @param pu64Dst Where to write the VMCS value.
7647 * @param u64VmcsField The VMCS field.
7648 *
7649 * @remarks May be called with interrupts disabled.
7650 */
7651IEM_STATIC void iemVmxVmreadNoCheck(PCVMXVVMCS pVmcs, uint64_t *pu64Dst, uint64_t u64VmcsField)
7652{
7653 VMXVMCSFIELD VmcsField;
7654 VmcsField.u = u64VmcsField;
7655 uint8_t const uWidth = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_WIDTH);
7656 uint8_t const uType = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_TYPE);
7657 uint8_t const uWidthType = (uWidth << 2) | uType;
7658 uint8_t const uIndex = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_INDEX);
7659 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
7660 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
7661 AssertMsg(offField < VMX_V_VMCS_SIZE, ("off=%u field=%#RX64 width=%#x type=%#x index=%#x (%u)\n", offField, u64VmcsField,
7662 uWidth, uType, uIndex, uIndex));
7663 AssertCompile(VMX_V_SHADOW_VMCS_SIZE == VMX_V_VMCS_SIZE);
7664
7665 /*
7666 * Read the VMCS component based on the field's effective width.
7667 *
7668 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
7669 * indicates high bits (little endian).
7670 *
7671 * Note! The caller is responsible to trim the result and update registers
7672 * or memory locations are required. Here we just zero-extend to the largest
7673 * type (i.e. 64-bits).
7674 */
7675 uint8_t const *pbVmcs = (uint8_t const *)pVmcs;
7676 uint8_t const *pbField = pbVmcs + offField;
7677 uint8_t const uEffWidth = VMXGetVmcsFieldWidthEff(VmcsField.u);
7678 switch (uEffWidth)
7679 {
7680 case VMX_VMCSFIELD_WIDTH_64BIT:
7681 case VMX_VMCSFIELD_WIDTH_NATURAL: *pu64Dst = *(uint64_t const *)pbField; break;
7682 case VMX_VMCSFIELD_WIDTH_32BIT: *pu64Dst = *(uint32_t const *)pbField; break;
7683 case VMX_VMCSFIELD_WIDTH_16BIT: *pu64Dst = *(uint16_t const *)pbField; break;
7684 }
7685}
7686
7687
7688/**
7689 * VMREAD common (memory/register) instruction execution worker.
7690 *
7691 * @returns Strict VBox status code.
7692 * @param pVCpu The cross context virtual CPU structure.
7693 * @param cbInstr The instruction length in bytes.
7694 * @param pu64Dst Where to write the VMCS value (only updated when
7695 * VINF_SUCCESS is returned).
7696 * @param u64VmcsField The VMCS field.
7697 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
7698 * NULL.
7699 */
7700IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPUCC pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64VmcsField,
7701 PCVMXVEXITINFO pExitInfo)
7702{
7703 /* Nested-guest intercept. */
7704 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7705 && CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMREAD, u64VmcsField))
7706 {
7707 if (pExitInfo)
7708 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7709 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMREAD, VMXINSTRID_VMREAD, cbInstr);
7710 }
7711
7712 /* CPL. */
7713 if (pVCpu->iem.s.uCpl == 0)
7714 { /* likely */ }
7715 else
7716 {
7717 Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7718 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Cpl;
7719 return iemRaiseGeneralProtectionFault0(pVCpu);
7720 }
7721
7722 /* VMCS pointer in root mode. */
7723 if ( !IEM_VMX_IS_ROOT_MODE(pVCpu)
7724 || IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
7725 { /* likely */ }
7726 else
7727 {
7728 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7729 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrInvalid;
7730 iemVmxVmFailInvalid(pVCpu);
7731 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7732 return VINF_SUCCESS;
7733 }
7734
7735 /* VMCS-link pointer in non-root mode. */
7736 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7737 || IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
7738 { /* likely */ }
7739 else
7740 {
7741 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
7742 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_LinkPtrInvalid;
7743 iemVmxVmFailInvalid(pVCpu);
7744 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7745 return VINF_SUCCESS;
7746 }
7747
7748 /* Supported VMCS field. */
7749 if (CPUMIsGuestVmxVmcsFieldValid(pVCpu->CTX_SUFF(pVM), u64VmcsField))
7750 { /* likely */ }
7751 else
7752 {
7753 Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64VmcsField));
7754 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_FieldInvalid;
7755 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = u64VmcsField;
7756 iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
7757 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7758 return VINF_SUCCESS;
7759 }
7760
7761 /*
7762 * Reading from the current or shadow VMCS.
7763 */
7764 PCVMXVVMCS pVmcs = !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7765 ? &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs
7766 : &pVCpu->cpum.GstCtx.hwvirt.vmx.ShadowVmcs;
7767 iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
7768 return VINF_SUCCESS;
7769}
7770
7771
7772/**
7773 * VMREAD (64-bit register) instruction execution worker.
7774 *
7775 * @returns Strict VBox status code.
7776 * @param pVCpu The cross context virtual CPU structure.
7777 * @param cbInstr The instruction length in bytes.
7778 * @param pu64Dst Where to store the VMCS field's value.
7779 * @param u64VmcsField The VMCS field.
7780 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
7781 * NULL.
7782 */
7783IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPUCC pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64VmcsField,
7784 PCVMXVEXITINFO pExitInfo)
7785{
7786 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, u64VmcsField, pExitInfo);
7787 if (rcStrict == VINF_SUCCESS)
7788 {
7789 iemVmxVmreadSuccess(pVCpu, cbInstr);
7790 return VINF_SUCCESS;
7791 }
7792
7793 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7794 return rcStrict;
7795}
7796
7797
7798/**
7799 * VMREAD (32-bit register) instruction execution worker.
7800 *
7801 * @returns Strict VBox status code.
7802 * @param pVCpu The cross context virtual CPU structure.
7803 * @param cbInstr The instruction length in bytes.
7804 * @param pu32Dst Where to store the VMCS field's value.
7805 * @param u32VmcsField The VMCS field.
7806 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
7807 * NULL.
7808 */
7809IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint64_t u32VmcsField,
7810 PCVMXVEXITINFO pExitInfo)
7811{
7812 uint64_t u64Dst;
7813 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u32VmcsField, pExitInfo);
7814 if (rcStrict == VINF_SUCCESS)
7815 {
7816 *pu32Dst = u64Dst;
7817 iemVmxVmreadSuccess(pVCpu, cbInstr);
7818 return VINF_SUCCESS;
7819 }
7820
7821 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7822 return rcStrict;
7823}
7824
7825
7826/**
7827 * VMREAD (memory) instruction execution worker.
7828 *
7829 * @returns Strict VBox status code.
7830 * @param pVCpu The cross context virtual CPU structure.
7831 * @param cbInstr The instruction length in bytes.
7832 * @param iEffSeg The effective segment register to use with @a u64Val.
7833 * Pass UINT8_MAX if it is a register access.
7834 * @param GCPtrDst The guest linear address to store the VMCS field's
7835 * value.
7836 * @param u64VmcsField The VMCS field.
7837 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
7838 * NULL.
7839 */
7840IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDst, uint64_t u64VmcsField,
7841 PCVMXVEXITINFO pExitInfo)
7842{
7843 uint64_t u64Dst;
7844 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u64VmcsField, pExitInfo);
7845 if (rcStrict == VINF_SUCCESS)
7846 {
7847 /*
7848 * Write the VMCS field's value to the location specified in guest-memory.
7849 */
7850 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7851 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
7852 else
7853 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
7854 if (rcStrict == VINF_SUCCESS)
7855 {
7856 iemVmxVmreadSuccess(pVCpu, cbInstr);
7857 return VINF_SUCCESS;
7858 }
7859
7860 Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
7861 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrMap;
7862 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPtrDst;
7863 return rcStrict;
7864 }
7865
7866 Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7867 return rcStrict;
7868}
7869
7870
7871/**
7872 * VMWRITE instruction execution worker that does not perform any validation
7873 * checks.
7874 *
7875 * Callers are expected to have performed the necessary checks and to ensure the
7876 * VMWRITE will succeed.
7877 *
7878 * @param pVmcs Pointer to the virtual VMCS.
7879 * @param u64Val The value to write.
7880 * @param u64VmcsField The VMCS field.
7881 *
7882 * @remarks May be called with interrupts disabled.
7883 */
7884IEM_STATIC void iemVmxVmwriteNoCheck(PVMXVVMCS pVmcs, uint64_t u64Val, uint64_t u64VmcsField)
7885{
7886 VMXVMCSFIELD VmcsField;
7887 VmcsField.u = u64VmcsField;
7888 uint8_t const uWidth = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_WIDTH);
7889 uint8_t const uType = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_TYPE);
7890 uint8_t const uWidthType = (uWidth << 2) | uType;
7891 uint8_t const uIndex = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_INDEX);
7892 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
7893 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
7894 Assert(offField < VMX_V_VMCS_SIZE);
7895 AssertCompile(VMX_V_SHADOW_VMCS_SIZE == VMX_V_VMCS_SIZE);
7896
7897 /*
7898 * Write the VMCS component based on the field's effective width.
7899 *
7900 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
7901 * indicates high bits (little endian).
7902 */
7903 uint8_t *pbVmcs = (uint8_t *)pVmcs;
7904 uint8_t *pbField = pbVmcs + offField;
7905 uint8_t const uEffWidth = VMXGetVmcsFieldWidthEff(VmcsField.u);
7906 switch (uEffWidth)
7907 {
7908 case VMX_VMCSFIELD_WIDTH_64BIT:
7909 case VMX_VMCSFIELD_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
7910 case VMX_VMCSFIELD_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break;
7911 case VMX_VMCSFIELD_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break;
7912 }
7913}
7914
7915
7916/**
7917 * VMWRITE instruction execution worker.
7918 *
7919 * @returns Strict VBox status code.
7920 * @param pVCpu The cross context virtual CPU structure.
7921 * @param cbInstr The instruction length in bytes.
7922 * @param iEffSeg The effective segment register to use with @a u64Val.
7923 * Pass UINT8_MAX if it is a register access.
7924 * @param u64Val The value to write (or guest linear address to the
7925 * value), @a iEffSeg will indicate if it's a memory
7926 * operand.
7927 * @param u64VmcsField The VMCS field.
7928 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
7929 * NULL.
7930 */
7931IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, uint64_t u64Val, uint64_t u64VmcsField,
7932 PCVMXVEXITINFO pExitInfo)
7933{
7934 /* Nested-guest intercept. */
7935 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7936 && CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMWRITE, u64VmcsField))
7937 {
7938 if (pExitInfo)
7939 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7940 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMWRITE, VMXINSTRID_VMWRITE, cbInstr);
7941 }
7942
7943 /* CPL. */
7944 if (pVCpu->iem.s.uCpl == 0)
7945 { /* likely */ }
7946 else
7947 {
7948 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7949 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Cpl;
7950 return iemRaiseGeneralProtectionFault0(pVCpu);
7951 }
7952
7953 /* VMCS pointer in root mode. */
7954 if ( !IEM_VMX_IS_ROOT_MODE(pVCpu)
7955 || IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
7956 { /* likely */ }
7957 else
7958 {
7959 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7960 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrInvalid;
7961 iemVmxVmFailInvalid(pVCpu);
7962 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7963 return VINF_SUCCESS;
7964 }
7965
7966 /* VMCS-link pointer in non-root mode. */
7967 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7968 || IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
7969 { /* likely */ }
7970 else
7971 {
7972 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
7973 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_LinkPtrInvalid;
7974 iemVmxVmFailInvalid(pVCpu);
7975 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7976 return VINF_SUCCESS;
7977 }
7978
7979 /* If the VMWRITE instruction references memory, access the specified memory operand. */
7980 bool const fIsRegOperand = iEffSeg == UINT8_MAX;
7981 if (!fIsRegOperand)
7982 {
7983 /* Read the value from the specified guest memory location. */
7984 VBOXSTRICTRC rcStrict;
7985 RTGCPTR const GCPtrVal = u64Val;
7986 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7987 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
7988 else
7989 rcStrict = iemMemFetchDataU32_ZX_U64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
7990 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7991 {
7992 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
7993 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrMap;
7994 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPtrVal;
7995 return rcStrict;
7996 }
7997 }
7998 else
7999 Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
8000
8001 /* Supported VMCS field. */
8002 if (CPUMIsGuestVmxVmcsFieldValid(pVCpu->CTX_SUFF(pVM), u64VmcsField))
8003 { /* likely */ }
8004 else
8005 {
8006 Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64VmcsField));
8007 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldInvalid;
8008 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = u64VmcsField;
8009 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
8010 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8011 return VINF_SUCCESS;
8012 }
8013
8014 /* Read-only VMCS field. */
8015 bool const fIsFieldReadOnly = VMXIsVmcsFieldReadOnly(u64VmcsField);
8016 if ( !fIsFieldReadOnly
8017 || IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
8018 { /* likely */ }
8019 else
8020 {
8021 Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64VmcsField));
8022 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldRo;
8023 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = u64VmcsField;
8024 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
8025 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8026 return VINF_SUCCESS;
8027 }
8028
8029 /*
8030 * Write to the current or shadow VMCS.
8031 */
8032 bool const fInVmxNonRootMode = IEM_VMX_IS_NON_ROOT_MODE(pVCpu);
8033 PVMXVVMCS pVmcs = !fInVmxNonRootMode
8034 ? &pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs
8035 : &pVCpu->cpum.GstCtx.hwvirt.vmx.ShadowVmcs;
8036 iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
8037
8038 /* Notify HM that the VMCS content might have changed. */
8039 if (!fInVmxNonRootMode)
8040 HMNotifyVmxNstGstCurrentVmcsChanged(pVCpu);
8041
8042 iemVmxVmSucceed(pVCpu);
8043 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8044 return VINF_SUCCESS;
8045}
8046
8047
8048/**
8049 * VMCLEAR instruction execution worker.
8050 *
8051 * @returns Strict VBox status code.
8052 * @param pVCpu The cross context virtual CPU structure.
8053 * @param cbInstr The instruction length in bytes.
8054 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
8055 * @param GCPtrVmcs The linear address of the VMCS pointer.
8056 * @param pExitInfo Pointer to the VM-exit information. Optional, can be NULL.
8057 *
8058 * @remarks Common VMX instruction checks are already expected to by the caller,
8059 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8060 */
8061IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
8062 PCVMXVEXITINFO pExitInfo)
8063{
8064 /* Nested-guest intercept. */
8065 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8066 {
8067 if (pExitInfo)
8068 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
8069 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMCLEAR, VMXINSTRID_NONE, cbInstr);
8070 }
8071
8072 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
8073
8074 /* CPL. */
8075 if (pVCpu->iem.s.uCpl == 0)
8076 { /* likely */ }
8077 else
8078 {
8079 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8080 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Cpl;
8081 return iemRaiseGeneralProtectionFault0(pVCpu);
8082 }
8083
8084 /* Get the VMCS pointer from the location specified by the source memory operand. */
8085 RTGCPHYS GCPhysVmcs;
8086 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
8087 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8088 { /* likely */ }
8089 else
8090 {
8091 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
8092 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrMap;
8093 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPtrVmcs;
8094 return rcStrict;
8095 }
8096
8097 /* VMCS pointer alignment. */
8098 if (!(GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK))
8099 { /* likely */ }
8100 else
8101 {
8102 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
8103 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAlign;
8104 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8105 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
8106 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8107 return VINF_SUCCESS;
8108 }
8109
8110 /* VMCS physical-address width limits. */
8111 if (!(GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth))
8112 { /* likely */ }
8113 else
8114 {
8115 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
8116 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrWidth;
8117 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8118 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
8119 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8120 return VINF_SUCCESS;
8121 }
8122
8123 /* VMCS is not the VMXON region. */
8124 if (GCPhysVmcs != pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
8125 { /* likely */ }
8126 else
8127 {
8128 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
8129 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrVmxon;
8130 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8131 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
8132 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8133 return VINF_SUCCESS;
8134 }
8135
8136 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
8137 restriction imposed by our implementation. */
8138 if (PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
8139 { /* likely */ }
8140 else
8141 {
8142 Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
8143 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAbnormal;
8144 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8145 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
8146 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8147 return VINF_SUCCESS;
8148 }
8149
8150 /*
8151 * VMCLEAR allows committing and clearing any valid VMCS pointer.
8152 *
8153 * If the current VMCS is the one being cleared, set its state to 'clear' and commit
8154 * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
8155 * to 'clear'.
8156 */
8157 uint8_t const fVmcsLaunchStateClear = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
8158 if ( IEM_VMX_HAS_CURRENT_VMCS(pVCpu)
8159 && IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
8160 {
8161 pVCpu->cpum.GstCtx.hwvirt.vmx.Vmcs.fVmcsState = fVmcsLaunchStateClear;
8162 iemVmxWriteCurrentVmcsToGstMem(pVCpu);
8163 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
8164 }
8165 else
8166 {
8167 AssertCompileMemberSize(VMXVVMCS, fVmcsState, sizeof(fVmcsLaunchStateClear));
8168 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + RT_UOFFSETOF(VMXVVMCS, fVmcsState),
8169 (const void *)&fVmcsLaunchStateClear, sizeof(fVmcsLaunchStateClear));
8170 if (RT_FAILURE(rcStrict))
8171 return rcStrict;
8172 }
8173
8174 iemVmxVmSucceed(pVCpu);
8175 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8176 return VINF_SUCCESS;
8177}
8178
8179
8180/**
8181 * VMPTRST instruction execution worker.
8182 *
8183 * @returns Strict VBox status code.
8184 * @param pVCpu The cross context virtual CPU structure.
8185 * @param cbInstr The instruction length in bytes.
8186 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
8187 * @param GCPtrVmcs The linear address of where to store the current VMCS
8188 * pointer.
8189 * @param pExitInfo Pointer to the VM-exit information. Optional, can be NULL.
8190 *
8191 * @remarks Common VMX instruction checks are already expected to by the caller,
8192 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8193 */
8194IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
8195 PCVMXVEXITINFO pExitInfo)
8196{
8197 /* Nested-guest intercept. */
8198 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8199 {
8200 if (pExitInfo)
8201 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
8202 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRST, VMXINSTRID_NONE, cbInstr);
8203 }
8204
8205 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
8206
8207 /* CPL. */
8208 if (pVCpu->iem.s.uCpl == 0)
8209 { /* likely */ }
8210 else
8211 {
8212 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8213 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Cpl;
8214 return iemRaiseGeneralProtectionFault0(pVCpu);
8215 }
8216
8217 /* Set the VMCS pointer to the location specified by the destination memory operand. */
8218 AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
8219 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
8220 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8221 {
8222 iemVmxVmSucceed(pVCpu);
8223 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8224 return rcStrict;
8225 }
8226
8227 Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8228 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_PtrMap;
8229 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPtrVmcs;
8230 return rcStrict;
8231}
8232
8233
8234/**
8235 * VMPTRLD instruction execution worker.
8236 *
8237 * @returns Strict VBox status code.
8238 * @param pVCpu The cross context virtual CPU structure.
8239 * @param cbInstr The instruction length in bytes.
8240 * @param GCPtrVmcs The linear address of the current VMCS pointer.
8241 * @param pExitInfo Pointer to the VM-exit information. Optional, can be NULL.
8242 *
8243 * @remarks Common VMX instruction checks are already expected to by the caller,
8244 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8245 */
8246IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
8247 PCVMXVEXITINFO pExitInfo)
8248{
8249 /* Nested-guest intercept. */
8250 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8251 {
8252 if (pExitInfo)
8253 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
8254 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRLD, VMXINSTRID_NONE, cbInstr);
8255 }
8256
8257 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
8258
8259 /* CPL. */
8260 if (pVCpu->iem.s.uCpl == 0)
8261 { /* likely */ }
8262 else
8263 {
8264 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8265 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Cpl;
8266 return iemRaiseGeneralProtectionFault0(pVCpu);
8267 }
8268
8269 /* Get the VMCS pointer from the location specified by the source memory operand. */
8270 RTGCPHYS GCPhysVmcs;
8271 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
8272 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8273 { /* likely */ }
8274 else
8275 {
8276 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
8277 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrMap;
8278 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPtrVmcs;
8279 return rcStrict;
8280 }
8281
8282 /* VMCS pointer alignment. */
8283 if (!(GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK))
8284 { /* likely */ }
8285 else
8286 {
8287 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
8288 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAlign;
8289 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8290 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
8291 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8292 return VINF_SUCCESS;
8293 }
8294
8295 /* VMCS physical-address width limits. */
8296 if (!(GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth))
8297 { /* likely */ }
8298 else
8299 {
8300 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
8301 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrWidth;
8302 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8303 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
8304 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8305 return VINF_SUCCESS;
8306 }
8307
8308 /* VMCS is not the VMXON region. */
8309 if (GCPhysVmcs != pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
8310 { /* likely */ }
8311 else
8312 {
8313 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
8314 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrVmxon;
8315 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8316 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
8317 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8318 return VINF_SUCCESS;
8319 }
8320
8321 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
8322 restriction imposed by our implementation. */
8323 if (PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
8324 { /* likely */ }
8325 else
8326 {
8327 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
8328 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAbnormal;
8329 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8330 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
8331 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8332 return VINF_SUCCESS;
8333 }
8334
8335 /* Read just the VMCS revision from the VMCS. */
8336 VMXVMCSREVID VmcsRevId;
8337 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
8338 if (RT_SUCCESS(rc))
8339 { /* likely */ }
8340 else
8341 {
8342 Log(("vmptrld: Failed to read revision identifier from VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
8343 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_RevPtrReadPhys;
8344 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8345 return rc;
8346 }
8347
8348 /*
8349 * Verify the VMCS revision specified by the guest matches what we reported to the guest.
8350 * Verify the VMCS is not a shadow VMCS, if the VMCS shadowing feature is supported.
8351 */
8352 if ( VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID
8353 && ( !VmcsRevId.n.fIsShadowVmcs
8354 || IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
8355 { /* likely */ }
8356 else
8357 {
8358 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
8359 {
8360 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32, GCPtrVmcs=%#RGv GCPhysVmcs=%#RGp -> VMFail()\n",
8361 VMX_V_VMCS_REVISION_ID, VmcsRevId.n.u31RevisionId, GCPtrVmcs, GCPhysVmcs));
8362 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_VmcsRevId;
8363 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
8364 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8365 return VINF_SUCCESS;
8366 }
8367
8368 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
8369 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_ShadowVmcs;
8370 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
8371 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8372 return VINF_SUCCESS;
8373 }
8374
8375 /*
8376 * We cache only the current VMCS in CPUMCTX. Therefore, VMPTRLD should always flush
8377 * the cache of an existing, current VMCS back to guest memory before loading a new,
8378 * different current VMCS.
8379 */
8380 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
8381 {
8382 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
8383 {
8384 iemVmxWriteCurrentVmcsToGstMem(pVCpu);
8385 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
8386 }
8387
8388 /* Set the new VMCS as the current VMCS and read it from guest memory. */
8389 IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
8390 rc = iemVmxReadCurrentVmcsFromGstMem(pVCpu);
8391 if (RT_SUCCESS(rc))
8392 {
8393 /* Notify HM that a new, current VMCS is loaded. */
8394 HMNotifyVmxNstGstCurrentVmcsChanged(pVCpu);
8395 }
8396 else
8397 {
8398 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
8399 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrReadPhys;
8400 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmcs;
8401 return rc;
8402 }
8403 }
8404
8405 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
8406 iemVmxVmSucceed(pVCpu);
8407 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8408 return VINF_SUCCESS;
8409}
8410
8411
8412/**
8413 * INVVPID instruction execution worker.
8414 *
8415 * @returns Strict VBox status code.
8416 * @param pVCpu The cross context virtual CPU structure.
8417 * @param cbInstr The instruction length in bytes.
8418 * @param iEffSeg The segment of the invvpid descriptor.
8419 * @param GCPtrInvvpidDesc The address of invvpid descriptor.
8420 * @param u64InvvpidType The invalidation type.
8421 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
8422 * NULL.
8423 *
8424 * @remarks Common VMX instruction checks are already expected to by the caller,
8425 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8426 */
8427IEM_STATIC VBOXSTRICTRC iemVmxInvvpid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrInvvpidDesc,
8428 uint64_t u64InvvpidType, PCVMXVEXITINFO pExitInfo)
8429{
8430 /* Check if INVVPID instruction is supported, otherwise raise #UD. */
8431 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVpid)
8432 return iemRaiseUndefinedOpcode(pVCpu);
8433
8434 /* Nested-guest intercept. */
8435 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8436 {
8437 if (pExitInfo)
8438 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
8439 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_INVVPID, VMXINSTRID_NONE, cbInstr);
8440 }
8441
8442 /* CPL. */
8443 if (pVCpu->iem.s.uCpl != 0)
8444 {
8445 Log(("invvpid: CPL != 0 -> #GP(0)\n"));
8446 return iemRaiseGeneralProtectionFault0(pVCpu);
8447 }
8448
8449 /*
8450 * Validate INVVPID invalidation type.
8451 *
8452 * The instruction specifies exactly ONE of the supported invalidation types.
8453 *
8454 * Each of the types has a bit in IA32_VMX_EPT_VPID_CAP MSR specifying if it is
8455 * supported. In theory, it's possible for a CPU to not support flushing individual
8456 * addresses but all the other types or any other combination. We do not take any
8457 * shortcuts here by assuming the types we currently expose to the guest.
8458 */
8459 uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps;
8460 uint8_t const fTypeIndivAddr = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
8461 uint8_t const fTypeSingleCtx = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX);
8462 uint8_t const fTypeAllCtx = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_ALL_CTX);
8463 uint8_t const fTypeSingleCtxRetainGlobals = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX_RETAIN_GLOBALS);
8464 if ( (fTypeIndivAddr && u64InvvpidType == VMXTLBFLUSHVPID_INDIV_ADDR)
8465 || (fTypeSingleCtx && u64InvvpidType == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
8466 || (fTypeAllCtx && u64InvvpidType == VMXTLBFLUSHVPID_ALL_CONTEXTS)
8467 || (fTypeSingleCtxRetainGlobals && u64InvvpidType == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS))
8468 { /* likely */ }
8469 else
8470 {
8471 Log(("invvpid: invalid/unsupported invvpid type %#x -> VMFail\n", u64InvvpidType));
8472 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_TypeInvalid;
8473 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = u64InvvpidType;
8474 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
8475 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8476 return VINF_SUCCESS;
8477 }
8478
8479 /*
8480 * Fetch the invvpid descriptor from guest memory.
8481 */
8482 RTUINT128U uDesc;
8483 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInvvpidDesc);
8484 if (rcStrict == VINF_SUCCESS)
8485 {
8486 /*
8487 * Validate the descriptor.
8488 */
8489 if (uDesc.s.Lo > 0xfff)
8490 {
8491 Log(("invvpid: reserved bits set in invvpid descriptor %#RX64 -> #GP(0)\n", uDesc.s.Lo));
8492 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_DescRsvd;
8493 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = uDesc.s.Lo;
8494 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
8495 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8496 return VINF_SUCCESS;
8497 }
8498
8499 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
8500 RTGCUINTPTR64 const GCPtrInvAddr = uDesc.s.Hi;
8501 uint8_t const uVpid = uDesc.s.Lo & UINT64_C(0xfff);
8502 uint64_t const uCr3 = pVCpu->cpum.GstCtx.cr3;
8503 switch (u64InvvpidType)
8504 {
8505 case VMXTLBFLUSHVPID_INDIV_ADDR:
8506 {
8507 if (uVpid != 0)
8508 {
8509 if (IEM_IS_CANONICAL(GCPtrInvAddr))
8510 {
8511 /* Invalidate mappings for the linear address tagged with VPID. */
8512 /** @todo PGM support for VPID? Currently just flush everything. */
8513 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
8514 iemVmxVmSucceed(pVCpu);
8515 }
8516 else
8517 {
8518 Log(("invvpid: invalidation address %#RGP is not canonical -> VMFail\n", GCPtrInvAddr));
8519 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type0InvalidAddr;
8520 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPtrInvAddr;
8521 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
8522 }
8523 }
8524 else
8525 {
8526 Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, u64InvvpidType));
8527 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type0InvalidVpid;
8528 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = u64InvvpidType;
8529 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
8530 }
8531 break;
8532 }
8533
8534 case VMXTLBFLUSHVPID_SINGLE_CONTEXT:
8535 {
8536 if (uVpid != 0)
8537 {
8538 /* Invalidate all mappings with VPID. */
8539 /** @todo PGM support for VPID? Currently just flush everything. */
8540 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
8541 iemVmxVmSucceed(pVCpu);
8542 }
8543 else
8544 {
8545 Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, u64InvvpidType));
8546 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type1InvalidVpid;
8547 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = u64InvvpidType;
8548 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
8549 }
8550 break;
8551 }
8552
8553 case VMXTLBFLUSHVPID_ALL_CONTEXTS:
8554 {
8555 /* Invalidate all mappings with non-zero VPIDs. */
8556 /** @todo PGM support for VPID? Currently just flush everything. */
8557 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
8558 iemVmxVmSucceed(pVCpu);
8559 break;
8560 }
8561
8562 case VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS:
8563 {
8564 if (uVpid != 0)
8565 {
8566 /* Invalidate all mappings with VPID except global translations. */
8567 /** @todo PGM support for VPID? Currently just flush everything. */
8568 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
8569 iemVmxVmSucceed(pVCpu);
8570 }
8571 else
8572 {
8573 Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, u64InvvpidType));
8574 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type3InvalidVpid;
8575 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = uVpid;
8576 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
8577 }
8578 break;
8579 }
8580 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8581 }
8582 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8583 }
8584 return rcStrict;
8585}
8586
8587
8588/**
8589 * VMXON instruction execution worker.
8590 *
8591 * @returns Strict VBox status code.
8592 * @param pVCpu The cross context virtual CPU structure.
8593 * @param cbInstr The instruction length in bytes.
8594 * @param iEffSeg The effective segment register to use with @a
8595 * GCPtrVmxon.
8596 * @param GCPtrVmxon The linear address of the VMXON pointer.
8597 * @param pExitInfo Pointer to the VM-exit information. Optional, can be NULL.
8598 *
8599 * @remarks Common VMX instruction checks are already expected to by the caller,
8600 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8601 */
8602IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
8603 PCVMXVEXITINFO pExitInfo)
8604{
8605 if (!IEM_VMX_IS_ROOT_MODE(pVCpu))
8606 {
8607 /* CPL. */
8608 if (pVCpu->iem.s.uCpl == 0)
8609 { /* likely */ }
8610 else
8611 {
8612 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8613 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cpl;
8614 return iemRaiseGeneralProtectionFault0(pVCpu);
8615 }
8616
8617 /* A20M (A20 Masked) mode. */
8618 if (PGMPhysIsA20Enabled(pVCpu))
8619 { /* likely */ }
8620 else
8621 {
8622 Log(("vmxon: A20M mode -> #GP(0)\n"));
8623 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_A20M;
8624 return iemRaiseGeneralProtectionFault0(pVCpu);
8625 }
8626
8627 /* CR0. */
8628 {
8629 /* CR0 MB1 bits. */
8630 uint64_t const uCr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
8631 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) == uCr0Fixed0)
8632 { /* likely */ }
8633 else
8634 {
8635 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
8636 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed0;
8637 return iemRaiseGeneralProtectionFault0(pVCpu);
8638 }
8639
8640 /* CR0 MBZ bits. */
8641 uint64_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
8642 if (!(pVCpu->cpum.GstCtx.cr0 & ~uCr0Fixed1))
8643 { /* likely */ }
8644 else
8645 {
8646 Log(("vmxon: CR0 fixed1 bits set -> #GP(0)\n"));
8647 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed1;
8648 return iemRaiseGeneralProtectionFault0(pVCpu);
8649 }
8650 }
8651
8652 /* CR4. */
8653 {
8654 /* CR4 MB1 bits. */
8655 uint64_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
8656 if ((pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0) == uCr4Fixed0)
8657 { /* likely */ }
8658 else
8659 {
8660 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
8661 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed0;
8662 return iemRaiseGeneralProtectionFault0(pVCpu);
8663 }
8664
8665 /* CR4 MBZ bits. */
8666 uint64_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
8667 if (!(pVCpu->cpum.GstCtx.cr4 & ~uCr4Fixed1))
8668 { /* likely */ }
8669 else
8670 {
8671 Log(("vmxon: CR4 fixed1 bits set -> #GP(0)\n"));
8672 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed1;
8673 return iemRaiseGeneralProtectionFault0(pVCpu);
8674 }
8675 }
8676
8677 /* Feature control MSR's LOCK and VMXON bits. */
8678 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatCtrl(pVCpu);
8679 if ((uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON))
8680 == (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON))
8681 { /* likely */ }
8682 else
8683 {
8684 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
8685 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_MsrFeatCtl;
8686 return iemRaiseGeneralProtectionFault0(pVCpu);
8687 }
8688
8689 /* Get the VMXON pointer from the location specified by the source memory operand. */
8690 RTGCPHYS GCPhysVmxon;
8691 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
8692 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8693 { /* likely */ }
8694 else
8695 {
8696 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
8697 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrMap;
8698 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPtrVmxon;
8699 return rcStrict;
8700 }
8701
8702 /* VMXON region pointer alignment. */
8703 if (!(GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK))
8704 { /* likely */ }
8705 else
8706 {
8707 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
8708 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAlign;
8709 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmxon;
8710 iemVmxVmFailInvalid(pVCpu);
8711 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8712 return VINF_SUCCESS;
8713 }
8714
8715 /* VMXON physical-address width limits. */
8716 if (!(GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth))
8717 { /* likely */ }
8718 else
8719 {
8720 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
8721 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrWidth;
8722 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmxon;
8723 iemVmxVmFailInvalid(pVCpu);
8724 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8725 return VINF_SUCCESS;
8726 }
8727
8728 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
8729 restriction imposed by our implementation. */
8730 if (PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
8731 { /* likely */ }
8732 else
8733 {
8734 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
8735 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAbnormal;
8736 pVCpu->cpum.GstCtx.hwvirt.vmx.uDiagAux = GCPhysVmxon;
8737 iemVmxVmFailInvalid(pVCpu);
8738 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8739 return VINF_SUCCESS;
8740 }
8741
8742 /* Read the VMCS revision ID from the VMXON region. */
8743 VMXVMCSREVID VmcsRevId;
8744 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
8745 if (RT_SUCCESS(rc))
8746 { /* likely */ }
8747 else
8748 {
8749 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
8750 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrReadPhys;
8751 return rc;
8752 }
8753
8754 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
8755 if (RT_LIKELY(VmcsRevId.u == VMX_V_VMCS_REVISION_ID))
8756 { /* likely */ }
8757 else
8758 {
8759 /* Revision ID mismatch. */
8760 if (!VmcsRevId.n.fIsShadowVmcs)
8761 {
8762 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
8763 VmcsRevId.n.u31RevisionId));
8764 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmcsRevId;
8765 iemVmxVmFailInvalid(pVCpu);
8766 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8767 return VINF_SUCCESS;
8768 }
8769
8770 /* Shadow VMCS disallowed. */
8771 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
8772 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_ShadowVmcs;
8773 iemVmxVmFailInvalid(pVCpu);
8774 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8775 return VINF_SUCCESS;
8776 }
8777
8778 /*
8779 * Record that we're in VMX operation, block INIT, block and disable A20M.
8780 */
8781 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
8782 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
8783 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
8784
8785 /* Clear address-range monitoring. */
8786 EMMonitorWaitClear(pVCpu);
8787 /** @todo NSTVMX: Intel PT. */
8788
8789 iemVmxVmSucceed(pVCpu);
8790 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8791 return VINF_SUCCESS;
8792 }
8793 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8794 {
8795 /* Nested-guest intercept. */
8796 if (pExitInfo)
8797 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
8798 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMXON, VMXINSTRID_NONE, cbInstr);
8799 }
8800
8801 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
8802
8803 /* CPL. */
8804 if (pVCpu->iem.s.uCpl > 0)
8805 {
8806 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8807 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxRootCpl;
8808 return iemRaiseGeneralProtectionFault0(pVCpu);
8809 }
8810
8811 /* VMXON when already in VMX root mode. */
8812 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
8813 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxAlreadyRoot;
8814 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8815 return VINF_SUCCESS;
8816}
8817
8818
8819/**
8820 * Implements 'VMXOFF'.
8821 *
8822 * @remarks Common VMX instruction checks are already expected to by the caller,
8823 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8824 */
8825IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
8826{
8827 /* Nested-guest intercept. */
8828 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8829 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMXOFF, cbInstr);
8830
8831 /* CPL. */
8832 if (pVCpu->iem.s.uCpl == 0)
8833 { /* likely */ }
8834 else
8835 {
8836 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8837 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxoff_Cpl;
8838 return iemRaiseGeneralProtectionFault0(pVCpu);
8839 }
8840
8841 /* Dual monitor treatment of SMIs and SMM. */
8842 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
8843 if (!(fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID))
8844 { /* likely */ }
8845 else
8846 {
8847 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
8848 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8849 return VINF_SUCCESS;
8850 }
8851
8852 /* Record that we're no longer in VMX root operation, block INIT, block and disable A20M. */
8853 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
8854 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
8855
8856 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
8857 { /** @todo NSTVMX: Unblock SMI. */ }
8858
8859 EMMonitorWaitClear(pVCpu);
8860 /** @todo NSTVMX: Unblock and enable A20M. */
8861
8862 iemVmxVmSucceed(pVCpu);
8863 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8864 return VINF_SUCCESS;
8865}
8866
8867
8868/**
8869 * Implements 'VMXON'.
8870 */
8871IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
8872{
8873 return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
8874}
8875
8876
8877/**
8878 * Implements 'VMLAUNCH'.
8879 */
8880IEM_CIMPL_DEF_0(iemCImpl_vmlaunch)
8881{
8882 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMLAUNCH);
8883}
8884
8885
8886/**
8887 * Implements 'VMRESUME'.
8888 */
8889IEM_CIMPL_DEF_0(iemCImpl_vmresume)
8890{
8891 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMRESUME);
8892}
8893
8894
8895/**
8896 * Implements 'VMPTRLD'.
8897 */
8898IEM_CIMPL_DEF_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
8899{
8900 return iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
8901}
8902
8903
8904/**
8905 * Implements 'VMPTRST'.
8906 */
8907IEM_CIMPL_DEF_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
8908{
8909 return iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
8910}
8911
8912
8913/**
8914 * Implements 'VMCLEAR'.
8915 */
8916IEM_CIMPL_DEF_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
8917{
8918 return iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
8919}
8920
8921
8922/**
8923 * Implements 'VMWRITE' register.
8924 */
8925IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64VmcsField)
8926{
8927 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, u64Val, u64VmcsField, NULL /* pExitInfo */);
8928}
8929
8930
8931/**
8932 * Implements 'VMWRITE' memory.
8933 */
8934IEM_CIMPL_DEF_3(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrVal, uint32_t, u64VmcsField)
8935{
8936 return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, GCPtrVal, u64VmcsField, NULL /* pExitInfo */);
8937}
8938
8939
8940/**
8941 * Implements 'VMREAD' register (64-bit).
8942 */
8943IEM_CIMPL_DEF_2(iemCImpl_vmread_reg64, uint64_t *, pu64Dst, uint64_t, u64VmcsField)
8944{
8945 return iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64VmcsField, NULL /* pExitInfo */);
8946}
8947
8948
8949/**
8950 * Implements 'VMREAD' register (32-bit).
8951 */
8952IEM_CIMPL_DEF_2(iemCImpl_vmread_reg32, uint32_t *, pu32Dst, uint32_t, u32VmcsField)
8953{
8954 return iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u32VmcsField, NULL /* pExitInfo */);
8955}
8956
8957
8958/**
8959 * Implements 'VMREAD' memory, 64-bit register.
8960 */
8961IEM_CIMPL_DEF_3(iemCImpl_vmread_mem_reg64, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u64VmcsField)
8962{
8963 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64VmcsField, NULL /* pExitInfo */);
8964}
8965
8966
8967/**
8968 * Implements 'VMREAD' memory, 32-bit register.
8969 */
8970IEM_CIMPL_DEF_3(iemCImpl_vmread_mem_reg32, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u32VmcsField)
8971{
8972 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u32VmcsField, NULL /* pExitInfo */);
8973}
8974
8975
8976/**
8977 * Implements 'INVVPID'.
8978 */
8979IEM_CIMPL_DEF_3(iemCImpl_invvpid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvvpidDesc, uint64_t, uInvvpidType)
8980{
8981 return iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, uInvvpidType, NULL /* pExitInfo */);
8982}
8983
8984
8985/**
8986 * Implements VMX's implementation of PAUSE.
8987 */
8988IEM_CIMPL_DEF_0(iemCImpl_vmx_pause)
8989{
8990 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8991 {
8992 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrPause(pVCpu, cbInstr);
8993 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
8994 return rcStrict;
8995 }
8996
8997 /*
8998 * Outside VMX non-root operation or if the PAUSE instruction does not cause
8999 * a VM-exit, the instruction operates normally.
9000 */
9001 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
9002 return VINF_SUCCESS;
9003}
9004
9005#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9006
9007
9008/**
9009 * Implements 'VMCALL'.
9010 */
9011IEM_CIMPL_DEF_0(iemCImpl_vmcall)
9012{
9013#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9014 /* Nested-guest intercept. */
9015 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
9016 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMCALL, cbInstr);
9017#endif
9018
9019 /* Join forces with vmmcall. */
9020 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
9021}
9022
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette