VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 79257

Last change on this file since 79257 was 79235, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 Some symmetry with regards to clearing and loading the current VMCS. Renamed iemVmxCommitCurrentVmcsToMemory to iemVmxWriteCurrentVmcsToGstMem and added iemVmxReadCurrentVmcsFromGstMem. Will make it easier to add HM hooks in the right places with clearer names. Also the VMCS read/write functions do not set/clear the current VMCS ptr anymore, let the callers do this.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 348.1 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 79235 2019-06-19 08:42:11Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
23/**
24 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
25 * relative offsets.
26 */
27# ifdef IEM_WITH_CODE_TLB
28# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
29# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
30# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
31# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
32# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
33# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
34# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
35# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
36# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
37# else /* !IEM_WITH_CODE_TLB */
38# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
39 do \
40 { \
41 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
42 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
43 } while (0)
44
45# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
46
47# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
48 do \
49 { \
50 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
51 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
52 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
53 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
54 } while (0)
55
56# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
57 do \
58 { \
59 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
60 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
61 } while (0)
62
63# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
64 do \
65 { \
66 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
67 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
68 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
69 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
70 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
71 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
72 } while (0)
73
74# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
75 do \
76 { \
77 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
78 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
79 } while (0)
80
81# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
82 do \
83 { \
84 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
85 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
86 } while (0)
87
88# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
89 do \
90 { \
91 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
92 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
93 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
94 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
95 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
96 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
97 } while (0)
98# endif /* !IEM_WITH_CODE_TLB */
99
100/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
101# define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
102
103/** Whether a shadow VMCS is present for the given VCPU. */
104# define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
105
106/** Gets the VMXON region pointer. */
107# define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
108
109/** Gets the guest-physical address of the current VMCS for the given VCPU. */
110# define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
111
112/** Whether a current VMCS is present for the given VCPU. */
113# define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
114
115/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
116# define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
117 do \
118 { \
119 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
120 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
121 } while (0)
122
123/** Clears any current VMCS for the given VCPU. */
124# define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
125 do \
126 { \
127 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
128 } while (0)
129
130/** Check for VMX instructions requiring to be in VMX operation.
131 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs updating. */
132# define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
133 do \
134 { \
135 if (IEM_VMX_IS_ROOT_MODE(a_pVCpu)) \
136 { /* likely */ } \
137 else \
138 { \
139 Log((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
140 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
141 return iemRaiseUndefinedOpcode(a_pVCpu); \
142 } \
143 } while (0)
144
145/** Marks a VM-entry failure with a diagnostic reason, logs and returns. */
146# define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag) \
147 do \
148 { \
149 Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_VmxDiag), \
150 HMGetVmxDiagDesc(a_VmxDiag), (a_pszFailure))); \
151 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
152 return VERR_VMX_VMENTRY_FAILED; \
153 } while (0)
154
155/** Marks a VM-exit failure with a diagnostic reason, logs and returns. */
156# define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \
157 do \
158 { \
159 Log(("VM-exit failed! uExitReason=%u enmDiag=%u (%s) -> %s\n", (a_uExitReason), (a_VmxDiag), \
160 HMGetVmxDiagDesc(a_VmxDiag), (a_pszFailure))); \
161 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
162 return VERR_VMX_VMEXIT_FAILED; \
163 } while (0)
164
165
166/*********************************************************************************************************************************
167* Global Variables *
168*********************************************************************************************************************************/
169/** @todo NSTVMX: The following VM-exit intercepts are pending:
170 * VMX_EXIT_IO_SMI
171 * VMX_EXIT_SMI
172 * VMX_EXIT_GETSEC
173 * VMX_EXIT_RSM
174 * VMX_EXIT_MONITOR (APIC access VM-exit caused by MONITOR pending)
175 * VMX_EXIT_ERR_MACHINE_CHECK (we never need to raise this?)
176 * VMX_EXIT_APIC_ACCESS
177 * VMX_EXIT_EPT_VIOLATION
178 * VMX_EXIT_EPT_MISCONFIG
179 * VMX_EXIT_INVEPT
180 * VMX_EXIT_RDRAND
181 * VMX_EXIT_VMFUNC
182 * VMX_EXIT_ENCLS
183 * VMX_EXIT_RDSEED
184 * VMX_EXIT_PML_FULL
185 * VMX_EXIT_XSAVES
186 * VMX_EXIT_XRSTORS
187 */
188/**
189 * Map of VMCS field encodings to their virtual-VMCS structure offsets.
190 *
191 * The first array dimension is VMCS field encoding of Width OR'ed with Type and the
192 * second dimension is the Index, see VMXVMCSFIELD.
193 */
194uint16_t const g_aoffVmcsMap[16][VMX_V_VMCS_MAX_INDEX + 1] =
195{
196 /* VMX_VMCSFIELD_WIDTH_16BIT | VMX_VMCSFIELD_TYPE_CONTROL: */
197 {
198 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u16Vpid),
199 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u16PostIntNotifyVector),
200 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u16EptpIndex),
201 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
202 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
203 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
204 },
205 /* VMX_VMCSFIELD_WIDTH_16BIT | VMX_VMCSFIELD_TYPE_VMEXIT_INFO: */
206 {
207 /* 0-7 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
208 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
209 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
210 /* 24-25 */ UINT16_MAX, UINT16_MAX
211 },
212 /* VMX_VMCSFIELD_WIDTH_16BIT | VMX_VMCSFIELD_TYPE_GUEST_STATE: */
213 {
214 /* 0 */ RT_UOFFSETOF(VMXVVMCS, GuestEs),
215 /* 1 */ RT_UOFFSETOF(VMXVVMCS, GuestCs),
216 /* 2 */ RT_UOFFSETOF(VMXVVMCS, GuestSs),
217 /* 3 */ RT_UOFFSETOF(VMXVVMCS, GuestDs),
218 /* 4 */ RT_UOFFSETOF(VMXVVMCS, GuestFs),
219 /* 5 */ RT_UOFFSETOF(VMXVVMCS, GuestGs),
220 /* 6 */ RT_UOFFSETOF(VMXVVMCS, GuestLdtr),
221 /* 7 */ RT_UOFFSETOF(VMXVVMCS, GuestTr),
222 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u16GuestIntStatus),
223 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u16PmlIndex),
224 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
225 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
226 },
227 /* VMX_VMCSFIELD_WIDTH_16BIT | VMX_VMCSFIELD_TYPE_HOST_STATE: */
228 {
229 /* 0 */ RT_UOFFSETOF(VMXVVMCS, HostEs),
230 /* 1 */ RT_UOFFSETOF(VMXVVMCS, HostCs),
231 /* 2 */ RT_UOFFSETOF(VMXVVMCS, HostSs),
232 /* 3 */ RT_UOFFSETOF(VMXVVMCS, HostDs),
233 /* 4 */ RT_UOFFSETOF(VMXVVMCS, HostFs),
234 /* 5 */ RT_UOFFSETOF(VMXVVMCS, HostGs),
235 /* 6 */ RT_UOFFSETOF(VMXVVMCS, HostTr),
236 /* 7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
237 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
238 /* 23-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
239 },
240 /* VMX_VMCSFIELD_WIDTH_64BIT | VMX_VMCSFIELD_TYPE_CONTROL: */
241 {
242 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64AddrIoBitmapA),
243 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
244 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
245 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64AddrExitMsrStore),
246 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64AddrExitMsrLoad),
247 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEntryMsrLoad),
248 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
249 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64AddrPml),
250 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64TscOffset),
251 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVirtApic),
252 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64AddrApicAccess),
253 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64AddrPostedIntDesc),
254 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64VmFuncCtls),
255 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64EptpPtr),
256 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap0),
257 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap1),
258 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap2),
259 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap3),
260 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEptpList),
261 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVmreadBitmap),
262 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVmwriteBitmap),
263 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u64AddrXcptVeInfo),
264 /* 22 */ RT_UOFFSETOF(VMXVVMCS, u64XssBitmap),
265 /* 23 */ RT_UOFFSETOF(VMXVVMCS, u64EnclsBitmap),
266 /* 24 */ RT_UOFFSETOF(VMXVVMCS, u64SpptPtr),
267 /* 25 */ RT_UOFFSETOF(VMXVVMCS, u64TscMultiplier)
268 },
269 /* VMX_VMCSFIELD_WIDTH_64BIT | VMX_VMCSFIELD_TYPE_VMEXIT_INFO: */
270 {
271 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64RoGuestPhysAddr),
272 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
273 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
274 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
275 /* 25 */ UINT16_MAX
276 },
277 /* VMX_VMCSFIELD_WIDTH_64BIT | VMX_VMCSFIELD_TYPE_GUEST_STATE: */
278 {
279 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
280 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
281 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPatMsr),
282 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64GuestEferMsr),
283 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
284 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte0),
285 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte1),
286 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte2),
287 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte3),
288 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
289 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRtitCtlMsr),
290 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
291 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
292 },
293 /* VMX_VMCSFIELD_WIDTH_64BIT | VMX_VMCSFIELD_TYPE_HOST_STATE: */
294 {
295 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64HostPatMsr),
296 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64HostEferMsr),
297 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
298 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
299 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
300 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
301 },
302 /* VMX_VMCSFIELD_WIDTH_32BIT | VMX_VMCSFIELD_TYPE_CONTROL: */
303 {
304 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32PinCtls),
305 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32ProcCtls),
306 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32XcptBitmap),
307 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32XcptPFMask),
308 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32XcptPFMatch),
309 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32Cr3TargetCount),
310 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32ExitCtls),
311 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
312 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
313 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u32EntryCtls),
314 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
315 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u32EntryIntInfo),
316 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
317 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u32EntryInstrLen),
318 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u32TprThreshold),
319 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u32ProcCtls2),
320 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u32PleGap),
321 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u32PleWindow),
322 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
323 },
324 /* VMX_VMCSFIELD_WIDTH_32BIT | VMX_VMCSFIELD_TYPE_VMEXIT_INFO: */
325 {
326 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32RoVmInstrError),
327 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitReason),
328 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitIntInfo),
329 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitIntErrCode),
330 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
331 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
332 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitInstrLen),
333 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitInstrInfo),
334 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
335 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
336 /* 24-25 */ UINT16_MAX, UINT16_MAX
337 },
338 /* VMX_VMCSFIELD_WIDTH_32BIT | VMX_VMCSFIELD_TYPE_GUEST_STATE: */
339 {
340 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsLimit),
341 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32GuestCsLimit),
342 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSsLimit),
343 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32GuestDsLimit),
344 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32GuestFsLimit),
345 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGsLimit),
346 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
347 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32GuestTrLimit),
348 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
349 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
350 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsAttr),
351 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u32GuestCsAttr),
352 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSsAttr),
353 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u32GuestDsAttr),
354 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u32GuestFsAttr),
355 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGsAttr),
356 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
357 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u32GuestTrAttr),
358 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u32GuestIntrState),
359 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u32GuestActivityState),
360 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSmBase),
361 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSysenterCS),
362 /* 22 */ RT_UOFFSETOF(VMXVVMCS, u32PreemptTimer),
363 /* 23-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
364 },
365 /* VMX_VMCSFIELD_WIDTH_32BIT | VMX_VMCSFIELD_TYPE_HOST_STATE: */
366 {
367 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32HostSysenterCs),
368 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
369 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
370 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
371 /* 25 */ UINT16_MAX
372 },
373 /* VMX_VMCSFIELD_WIDTH_NATURAL | VMX_VMCSFIELD_TYPE_CONTROL: */
374 {
375 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64Cr0Mask),
376 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64Cr4Mask),
377 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
378 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
379 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target0),
380 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target1),
381 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target2),
382 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target3),
383 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
384 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
385 /* 24-25 */ UINT16_MAX, UINT16_MAX
386 },
387 /* VMX_VMCSFIELD_WIDTH_NATURAL | VMX_VMCSFIELD_TYPE_VMEXIT_INFO: */
388 {
389 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64RoExitQual),
390 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRcx),
391 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRsi),
392 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRdi),
393 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRip),
394 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64RoGuestLinearAddr),
395 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
396 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
397 /* 22-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
398 },
399 /* VMX_VMCSFIELD_WIDTH_NATURAL | VMX_VMCSFIELD_TYPE_GUEST_STATE: */
400 {
401 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr0),
402 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr3),
403 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr4),
404 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64GuestEsBase),
405 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCsBase),
406 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSsBase),
407 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDsBase),
408 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64GuestFsBase),
409 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64GuestGsBase),
410 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64GuestLdtrBase),
411 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64GuestTrBase),
412 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64GuestGdtrBase),
413 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64GuestIdtrBase),
414 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDr7),
415 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRsp),
416 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRip),
417 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRFlags),
418 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpt),
419 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
420 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSysenterEip),
421 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
422 },
423 /* VMX_VMCSFIELD_WIDTH_NATURAL | VMX_VMCSFIELD_TYPE_HOST_STATE: */
424 {
425 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr0),
426 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr3),
427 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr4),
428 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64HostFsBase),
429 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64HostGsBase),
430 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64HostTrBase),
431 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64HostGdtrBase),
432 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64HostIdtrBase),
433 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64HostSysenterEsp),
434 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64HostSysenterEip),
435 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64HostRsp),
436 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64HostRip),
437 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
438 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
439 }
440};
441
442
443/**
444 * Gets a host selector from the VMCS.
445 *
446 * @param pVmcs Pointer to the virtual VMCS.
447 * @param iSelReg The index of the segment register (X86_SREG_XXX).
448 */
449DECLINLINE(RTSEL) iemVmxVmcsGetHostSelReg(PCVMXVVMCS pVmcs, uint8_t iSegReg)
450{
451 Assert(iSegReg < X86_SREG_COUNT);
452 RTSEL HostSel;
453 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_16BIT;
454 uint8_t const uType = VMX_VMCSFIELD_TYPE_HOST_STATE;
455 uint8_t const uWidthType = (uWidth << 2) | uType;
456 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS16_HOST_ES_SEL, VMX_BF_VMCSFIELD_INDEX);
457 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
458 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
459 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
460 uint8_t const *pbField = pbVmcs + offField;
461 HostSel = *(uint16_t *)pbField;
462 return HostSel;
463}
464
465
466/**
467 * Sets a guest segment register in the VMCS.
468 *
469 * @param pVmcs Pointer to the virtual VMCS.
470 * @param iSegReg The index of the segment register (X86_SREG_XXX).
471 * @param pSelReg Pointer to the segment register.
472 */
473IEM_STATIC void iemVmxVmcsSetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCCPUMSELREG pSelReg)
474{
475 Assert(pSelReg);
476 Assert(iSegReg < X86_SREG_COUNT);
477
478 /* Selector. */
479 {
480 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_16BIT;
481 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
482 uint8_t const uWidthType = (uWidth << 2) | uType;
483 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCSFIELD_INDEX);
484 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
485 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
486 uint8_t *pbVmcs = (uint8_t *)pVmcs;
487 uint8_t *pbField = pbVmcs + offField;
488 *(uint16_t *)pbField = pSelReg->Sel;
489 }
490
491 /* Limit. */
492 {
493 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_32BIT;
494 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
495 uint8_t const uWidthType = (uWidth << 2) | uType;
496 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCSFIELD_INDEX);
497 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
498 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
499 uint8_t *pbVmcs = (uint8_t *)pVmcs;
500 uint8_t *pbField = pbVmcs + offField;
501 *(uint32_t *)pbField = pSelReg->u32Limit;
502 }
503
504 /* Base. */
505 {
506 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_NATURAL;
507 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
508 uint8_t const uWidthType = (uWidth << 2) | uType;
509 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCSFIELD_INDEX);
510 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
511 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
512 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
513 uint8_t const *pbField = pbVmcs + offField;
514 *(uint64_t *)pbField = pSelReg->u64Base;
515 }
516
517 /* Attributes. */
518 {
519 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
520 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
521 | X86DESCATTR_UNUSABLE;
522 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_32BIT;
523 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
524 uint8_t const uWidthType = (uWidth << 2) | uType;
525 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCSFIELD_INDEX);
526 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
527 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
528 uint8_t *pbVmcs = (uint8_t *)pVmcs;
529 uint8_t *pbField = pbVmcs + offField;
530 *(uint32_t *)pbField = pSelReg->Attr.u & fValidAttrMask;
531 }
532}
533
534
535/**
536 * Gets a guest segment register from the VMCS.
537 *
538 * @returns VBox status code.
539 * @param pVmcs Pointer to the virtual VMCS.
540 * @param iSegReg The index of the segment register (X86_SREG_XXX).
541 * @param pSelReg Where to store the segment register (only updated when
542 * VINF_SUCCESS is returned).
543 *
544 * @remarks Warning! This does not validate the contents of the retrieved segment
545 * register.
546 */
547IEM_STATIC int iemVmxVmcsGetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCPUMSELREG pSelReg)
548{
549 Assert(pSelReg);
550 Assert(iSegReg < X86_SREG_COUNT);
551
552 /* Selector. */
553 uint16_t u16Sel;
554 {
555 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_16BIT;
556 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
557 uint8_t const uWidthType = (uWidth << 2) | uType;
558 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCSFIELD_INDEX);
559 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
560 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
561 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
562 uint8_t const *pbField = pbVmcs + offField;
563 u16Sel = *(uint16_t *)pbField;
564 }
565
566 /* Limit. */
567 uint32_t u32Limit;
568 {
569 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_32BIT;
570 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
571 uint8_t const uWidthType = (uWidth << 2) | uType;
572 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCSFIELD_INDEX);
573 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
574 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
575 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
576 uint8_t const *pbField = pbVmcs + offField;
577 u32Limit = *(uint32_t *)pbField;
578 }
579
580 /* Base. */
581 uint64_t u64Base;
582 {
583 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_NATURAL;
584 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
585 uint8_t const uWidthType = (uWidth << 2) | uType;
586 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCSFIELD_INDEX);
587 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
588 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
589 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
590 uint8_t const *pbField = pbVmcs + offField;
591 u64Base = *(uint64_t *)pbField;
592 /** @todo NSTVMX: Should we zero out high bits here for 32-bit virtual CPUs? */
593 }
594
595 /* Attributes. */
596 uint32_t u32Attr;
597 {
598 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_32BIT;
599 uint8_t const uType = VMX_VMCSFIELD_TYPE_GUEST_STATE;
600 uint8_t const uWidthType = (uWidth << 2) | uType;
601 uint8_t const uIndex = iSegReg + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCSFIELD_INDEX);
602 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
603 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
604 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
605 uint8_t const *pbField = pbVmcs + offField;
606 u32Attr = *(uint32_t *)pbField;
607 }
608
609 pSelReg->Sel = u16Sel;
610 pSelReg->ValidSel = u16Sel;
611 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
612 pSelReg->u32Limit = u32Limit;
613 pSelReg->u64Base = u64Base;
614 pSelReg->Attr.u = u32Attr;
615 return VINF_SUCCESS;
616}
617
618
619/**
620 * Gets a CR3 target value from the VMCS.
621 *
622 * @returns VBox status code.
623 * @param pVmcs Pointer to the virtual VMCS.
624 * @param idxCr3Target The index of the CR3-target value to retrieve.
625 * @param puValue Where to store the CR3-target value.
626 */
627IEM_STATIC uint64_t iemVmxVmcsGetCr3TargetValue(PCVMXVVMCS pVmcs, uint8_t idxCr3Target)
628{
629 Assert(idxCr3Target < VMX_V_CR3_TARGET_COUNT);
630 uint8_t const uWidth = VMX_VMCSFIELD_WIDTH_NATURAL;
631 uint8_t const uType = VMX_VMCSFIELD_TYPE_CONTROL;
632 uint8_t const uWidthType = (uWidth << 2) | uType;
633 uint8_t const uIndex = idxCr3Target + RT_BF_GET(VMX_VMCS_CTRL_CR3_TARGET_VAL0, VMX_BF_VMCSFIELD_INDEX);
634 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
635 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
636 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
637 uint8_t const *pbField = pbVmcs + offField;
638 uint64_t const uCr3TargetValue = *(uint64_t *)pbField;
639 return uCr3TargetValue;
640}
641
642
643/**
644 * Converts an IEM exception event type to a VMX event type.
645 *
646 * @returns The VMX event type.
647 * @param uVector The interrupt / exception vector.
648 * @param fFlags The IEM event flag (see IEM_XCPT_FLAGS_XXX).
649 */
650DECLINLINE(uint8_t) iemVmxGetEventType(uint32_t uVector, uint32_t fFlags)
651{
652 /* Paranoia (callers may use these interchangeably). */
653 AssertCompile(VMX_EXIT_INT_INFO_TYPE_NMI == VMX_IDT_VECTORING_INFO_TYPE_NMI);
654 AssertCompile(VMX_EXIT_INT_INFO_TYPE_HW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT);
655 AssertCompile(VMX_EXIT_INT_INFO_TYPE_EXT_INT == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
656 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT);
657 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_INT == VMX_IDT_VECTORING_INFO_TYPE_SW_INT);
658 AssertCompile(VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
659 AssertCompile(VMX_EXIT_INT_INFO_TYPE_NMI == VMX_ENTRY_INT_INFO_TYPE_NMI);
660 AssertCompile(VMX_EXIT_INT_INFO_TYPE_HW_XCPT == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT);
661 AssertCompile(VMX_EXIT_INT_INFO_TYPE_EXT_INT == VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
662 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_XCPT == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT);
663 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_INT == VMX_ENTRY_INT_INFO_TYPE_SW_INT);
664 AssertCompile(VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT);
665
666 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
667 {
668 if (uVector == X86_XCPT_NMI)
669 return VMX_EXIT_INT_INFO_TYPE_NMI;
670 return VMX_EXIT_INT_INFO_TYPE_HW_XCPT;
671 }
672
673 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
674 {
675 if (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR))
676 return VMX_EXIT_INT_INFO_TYPE_SW_XCPT;
677 if (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
678 return VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT;
679 return VMX_EXIT_INT_INFO_TYPE_SW_INT;
680 }
681
682 Assert(fFlags & IEM_XCPT_FLAGS_T_EXT_INT);
683 return VMX_EXIT_INT_INFO_TYPE_EXT_INT;
684}
685
686
687/**
688 * Sets the Exit qualification VMCS field.
689 *
690 * @param pVCpu The cross context virtual CPU structure.
691 * @param u64ExitQual The Exit qualification.
692 */
693DECL_FORCE_INLINE(void) iemVmxVmcsSetExitQual(PVMCPU pVCpu, uint64_t u64ExitQual)
694{
695 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
696 pVmcs->u64RoExitQual.u = u64ExitQual;
697}
698
699
700/**
701 * Sets the VM-exit interruption information field.
702 *
703 * @param pVCpu The cross context virtual CPU structure.
704 * @param uExitIntInfo The VM-exit interruption information.
705 */
706DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntInfo(PVMCPU pVCpu, uint32_t uExitIntInfo)
707{
708 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
709 pVmcs->u32RoExitIntInfo = uExitIntInfo;
710}
711
712
713/**
714 * Sets the VM-exit interruption error code.
715 *
716 * @param pVCpu The cross context virtual CPU structure.
717 * @param uErrCode The error code.
718 */
719DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntErrCode(PVMCPU pVCpu, uint32_t uErrCode)
720{
721 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
722 pVmcs->u32RoExitIntErrCode = uErrCode;
723}
724
725
726/**
727 * Sets the IDT-vectoring information field.
728 *
729 * @param pVCpu The cross context virtual CPU structure.
730 * @param uIdtVectorInfo The IDT-vectoring information.
731 */
732DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringInfo(PVMCPU pVCpu, uint32_t uIdtVectorInfo)
733{
734 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
735 pVmcs->u32RoIdtVectoringInfo = uIdtVectorInfo;
736}
737
738
739/**
740 * Sets the IDT-vectoring error code field.
741 *
742 * @param pVCpu The cross context virtual CPU structure.
743 * @param uErrCode The error code.
744 */
745DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringErrCode(PVMCPU pVCpu, uint32_t uErrCode)
746{
747 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
748 pVmcs->u32RoIdtVectoringErrCode = uErrCode;
749}
750
751
752/**
753 * Sets the VM-exit guest-linear address VMCS field.
754 *
755 * @param pVCpu The cross context virtual CPU structure.
756 * @param uGuestLinearAddr The VM-exit guest-linear address.
757 */
758DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestLinearAddr(PVMCPU pVCpu, uint64_t uGuestLinearAddr)
759{
760 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
761 pVmcs->u64RoGuestLinearAddr.u = uGuestLinearAddr;
762}
763
764
765/**
766 * Sets the VM-exit guest-physical address VMCS field.
767 *
768 * @param pVCpu The cross context virtual CPU structure.
769 * @param uGuestPhysAddr The VM-exit guest-physical address.
770 */
771DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestPhysAddr(PVMCPU pVCpu, uint64_t uGuestPhysAddr)
772{
773 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
774 pVmcs->u64RoGuestPhysAddr.u = uGuestPhysAddr;
775}
776
777
778/**
779 * Sets the VM-exit instruction length VMCS field.
780 *
781 * @param pVCpu The cross context virtual CPU structure.
782 * @param cbInstr The VM-exit instruction length in bytes.
783 *
784 * @remarks Callers may clear this field to 0. Hence, this function does not check
785 * the validity of the instruction length.
786 */
787DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrLen(PVMCPU pVCpu, uint32_t cbInstr)
788{
789 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
790 pVmcs->u32RoExitInstrLen = cbInstr;
791}
792
793
794/**
795 * Sets the VM-exit instruction info. VMCS field.
796 *
797 * @param pVCpu The cross context virtual CPU structure.
798 * @param uExitInstrInfo The VM-exit instruction information.
799 */
800DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitInstrInfo)
801{
802 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
803 pVmcs->u32RoExitInstrInfo = uExitInstrInfo;
804}
805
806
807/**
808 * Implements VMSucceed for VMX instruction success.
809 *
810 * @param pVCpu The cross context virtual CPU structure.
811 */
812DECL_FORCE_INLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
813{
814 return CPUMSetGuestVmxVmSucceed(&pVCpu->cpum.GstCtx);
815}
816
817
818/**
819 * Implements VMFailInvalid for VMX instruction failure.
820 *
821 * @param pVCpu The cross context virtual CPU structure.
822 */
823DECL_FORCE_INLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
824{
825 return CPUMSetGuestVmxVmFailInvalid(&pVCpu->cpum.GstCtx);
826}
827
828
829/**
830 * Implements VMFail for VMX instruction failure.
831 *
832 * @param pVCpu The cross context virtual CPU structure.
833 * @param enmInsErr The VM instruction error.
834 */
835DECL_FORCE_INLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
836{
837 return CPUMSetGuestVmxVmFail(&pVCpu->cpum.GstCtx, enmInsErr);
838}
839
840
841/**
842 * Checks if the given auto-load/store MSR area count is valid for the
843 * implementation.
844 *
845 * @returns @c true if it's within the valid limit, @c false otherwise.
846 * @param pVCpu The cross context virtual CPU structure.
847 * @param uMsrCount The MSR area count to check.
848 */
849DECL_FORCE_INLINE(bool) iemVmxIsAutoMsrCountValid(PCVMCPU pVCpu, uint32_t uMsrCount)
850{
851 uint64_t const u64VmxMiscMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Misc;
852 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(u64VmxMiscMsr);
853 Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
854 if (uMsrCount <= cMaxSupportedMsrs)
855 return true;
856 return false;
857}
858
859
860/**
861 * Flushes the current VMCS contents back to guest memory.
862 *
863 * @returns VBox status code.
864 * @param pVCpu The cross context virtual CPU structure.
865 */
866DECL_FORCE_INLINE(int) iemVmxWriteCurrentVmcsToGstMem(PVMCPU pVCpu)
867{
868 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
869 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
870 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
871 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));
872 return rc;
873}
874
875
876/**
877 * Populates the current VMCS contents from guest memory.
878 *
879 * @returns VBox status code.
880 * @param pVCpu The cross context virtual CPU structure.
881 */
882DECL_FORCE_INLINE(int) iemVmxReadCurrentVmcsFromGstMem(PVMCPU pVCpu)
883{
884 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
885 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
886 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs),
887 IEM_VMX_GET_CURRENT_VMCS(pVCpu), sizeof(VMXVVMCS));
888 return rc;
889}
890
891
892/**
893 * Implements VMSucceed for the VMREAD instruction and increments the guest RIP.
894 *
895 * @param pVCpu The cross context virtual CPU structure.
896 */
897DECL_FORCE_INLINE(void) iemVmxVmreadSuccess(PVMCPU pVCpu, uint8_t cbInstr)
898{
899 iemVmxVmSucceed(pVCpu);
900 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
901}
902
903
904/**
905 * Gets the instruction diagnostic for segment base checks during VM-entry of a
906 * nested-guest.
907 *
908 * @param iSegReg The segment index (X86_SREG_XXX).
909 */
910IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBase(unsigned iSegReg)
911{
912 switch (iSegReg)
913 {
914 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseCs;
915 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseDs;
916 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseEs;
917 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseFs;
918 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseGs;
919 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseSs;
920 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_1);
921 }
922}
923
924
925/**
926 * Gets the instruction diagnostic for segment base checks during VM-entry of a
927 * nested-guest that is in Virtual-8086 mode.
928 *
929 * @param iSegReg The segment index (X86_SREG_XXX).
930 */
931IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBaseV86(unsigned iSegReg)
932{
933 switch (iSegReg)
934 {
935 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseV86Cs;
936 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ds;
937 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseV86Es;
938 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseV86Fs;
939 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseV86Gs;
940 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ss;
941 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
942 }
943}
944
945
946/**
947 * Gets the instruction diagnostic for segment limit checks during VM-entry of a
948 * nested-guest that is in Virtual-8086 mode.
949 *
950 * @param iSegReg The segment index (X86_SREG_XXX).
951 */
952IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegLimitV86(unsigned iSegReg)
953{
954 switch (iSegReg)
955 {
956 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegLimitV86Cs;
957 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ds;
958 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegLimitV86Es;
959 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegLimitV86Fs;
960 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegLimitV86Gs;
961 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ss;
962 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_3);
963 }
964}
965
966
967/**
968 * Gets the instruction diagnostic for segment attribute checks during VM-entry of a
969 * nested-guest that is in Virtual-8086 mode.
970 *
971 * @param iSegReg The segment index (X86_SREG_XXX).
972 */
973IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrV86(unsigned iSegReg)
974{
975 switch (iSegReg)
976 {
977 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrV86Cs;
978 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ds;
979 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrV86Es;
980 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrV86Fs;
981 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrV86Gs;
982 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ss;
983 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_4);
984 }
985}
986
987
988/**
989 * Gets the instruction diagnostic for segment attributes reserved bits failure
990 * during VM-entry of a nested-guest.
991 *
992 * @param iSegReg The segment index (X86_SREG_XXX).
993 */
994IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrRsvd(unsigned iSegReg)
995{
996 switch (iSegReg)
997 {
998 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdCs;
999 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdDs;
1000 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrRsvdEs;
1001 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdFs;
1002 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdGs;
1003 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdSs;
1004 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_5);
1005 }
1006}
1007
1008
1009/**
1010 * Gets the instruction diagnostic for segment attributes descriptor-type
1011 * (code/segment or system) failure during VM-entry of a nested-guest.
1012 *
1013 * @param iSegReg The segment index (X86_SREG_XXX).
1014 */
1015IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDescType(unsigned iSegReg)
1016{
1017 switch (iSegReg)
1018 {
1019 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeCs;
1020 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeDs;
1021 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeEs;
1022 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeFs;
1023 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeGs;
1024 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs;
1025 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_6);
1026 }
1027}
1028
1029
1030/**
1031 * Gets the instruction diagnostic for segment attributes descriptor-type
1032 * (code/segment or system) failure during VM-entry of a nested-guest.
1033 *
1034 * @param iSegReg The segment index (X86_SREG_XXX).
1035 */
1036IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrPresent(unsigned iSegReg)
1037{
1038 switch (iSegReg)
1039 {
1040 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrPresentCs;
1041 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrPresentDs;
1042 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrPresentEs;
1043 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrPresentFs;
1044 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrPresentGs;
1045 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrPresentSs;
1046 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_7);
1047 }
1048}
1049
1050
1051/**
1052 * Gets the instruction diagnostic for segment attribute granularity failure during
1053 * VM-entry of a nested-guest.
1054 *
1055 * @param iSegReg The segment index (X86_SREG_XXX).
1056 */
1057IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrGran(unsigned iSegReg)
1058{
1059 switch (iSegReg)
1060 {
1061 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrGranCs;
1062 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrGranDs;
1063 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrGranEs;
1064 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrGranFs;
1065 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrGranGs;
1066 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrGranSs;
1067 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_8);
1068 }
1069}
1070
1071/**
1072 * Gets the instruction diagnostic for segment attribute DPL/RPL failure during
1073 * VM-entry of a nested-guest.
1074 *
1075 * @param iSegReg The segment index (X86_SREG_XXX).
1076 */
1077IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDplRpl(unsigned iSegReg)
1078{
1079 switch (iSegReg)
1080 {
1081 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplCs;
1082 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplDs;
1083 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDplRplEs;
1084 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplFs;
1085 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplGs;
1086 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplSs;
1087 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_9);
1088 }
1089}
1090
1091
1092/**
1093 * Gets the instruction diagnostic for segment attribute type accessed failure
1094 * during VM-entry of a nested-guest.
1095 *
1096 * @param iSegReg The segment index (X86_SREG_XXX).
1097 */
1098IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrTypeAcc(unsigned iSegReg)
1099{
1100 switch (iSegReg)
1101 {
1102 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccCs;
1103 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccDs;
1104 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccEs;
1105 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccFs;
1106 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccGs;
1107 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs;
1108 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_10);
1109 }
1110}
1111
1112
1113/**
1114 * Gets the instruction diagnostic for guest CR3 referenced PDPTE reserved bits
1115 * failure during VM-entry of a nested-guest.
1116 *
1117 * @param iSegReg The PDPTE entry index.
1118 */
1119IEM_STATIC VMXVDIAG iemVmxGetDiagVmentryPdpteRsvd(unsigned iPdpte)
1120{
1121 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
1122 switch (iPdpte)
1123 {
1124 case 0: return kVmxVDiag_Vmentry_GuestPdpte0Rsvd;
1125 case 1: return kVmxVDiag_Vmentry_GuestPdpte1Rsvd;
1126 case 2: return kVmxVDiag_Vmentry_GuestPdpte2Rsvd;
1127 case 3: return kVmxVDiag_Vmentry_GuestPdpte3Rsvd;
1128 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_11);
1129 }
1130}
1131
1132
1133/**
1134 * Gets the instruction diagnostic for host CR3 referenced PDPTE reserved bits
1135 * failure during VM-exit of a nested-guest.
1136 *
1137 * @param iSegReg The PDPTE entry index.
1138 */
1139IEM_STATIC VMXVDIAG iemVmxGetDiagVmexitPdpteRsvd(unsigned iPdpte)
1140{
1141 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
1142 switch (iPdpte)
1143 {
1144 case 0: return kVmxVDiag_Vmexit_HostPdpte0Rsvd;
1145 case 1: return kVmxVDiag_Vmexit_HostPdpte1Rsvd;
1146 case 2: return kVmxVDiag_Vmexit_HostPdpte2Rsvd;
1147 case 3: return kVmxVDiag_Vmexit_HostPdpte3Rsvd;
1148 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_12);
1149 }
1150}
1151
1152
1153/**
1154 * Saves the guest control registers, debug registers and some MSRs are part of
1155 * VM-exit.
1156 *
1157 * @param pVCpu The cross context virtual CPU structure.
1158 */
1159IEM_STATIC void iemVmxVmexitSaveGuestControlRegsMsrs(PVMCPU pVCpu)
1160{
1161 /*
1162 * Saves the guest control registers, debug registers and some MSRs.
1163 * See Intel spec. 27.3.1 "Saving Control Registers, Debug Registers and MSRs".
1164 */
1165 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1166
1167 /* Save control registers. */
1168 pVmcs->u64GuestCr0.u = pVCpu->cpum.GstCtx.cr0;
1169 pVmcs->u64GuestCr3.u = pVCpu->cpum.GstCtx.cr3;
1170 pVmcs->u64GuestCr4.u = pVCpu->cpum.GstCtx.cr4;
1171
1172 /* Save SYSENTER CS, ESP, EIP. */
1173 pVmcs->u32GuestSysenterCS = pVCpu->cpum.GstCtx.SysEnter.cs;
1174 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1175 {
1176 pVmcs->u64GuestSysenterEsp.u = pVCpu->cpum.GstCtx.SysEnter.esp;
1177 pVmcs->u64GuestSysenterEip.u = pVCpu->cpum.GstCtx.SysEnter.eip;
1178 }
1179 else
1180 {
1181 pVmcs->u64GuestSysenterEsp.s.Lo = pVCpu->cpum.GstCtx.SysEnter.esp;
1182 pVmcs->u64GuestSysenterEip.s.Lo = pVCpu->cpum.GstCtx.SysEnter.eip;
1183 }
1184
1185 /* Save debug registers (DR7 and IA32_DEBUGCTL MSR). */
1186 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG)
1187 {
1188 pVmcs->u64GuestDr7.u = pVCpu->cpum.GstCtx.dr[7];
1189 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
1190 }
1191
1192 /* Save PAT MSR. */
1193 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR)
1194 pVmcs->u64GuestPatMsr.u = pVCpu->cpum.GstCtx.msrPAT;
1195
1196 /* Save EFER MSR. */
1197 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR)
1198 pVmcs->u64GuestEferMsr.u = pVCpu->cpum.GstCtx.msrEFER;
1199
1200 /* We don't support clearing IA32_BNDCFGS MSR yet. */
1201 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR));
1202
1203 /* Nothing to do for SMBASE register - We don't support SMM yet. */
1204}
1205
1206
1207/**
1208 * Saves the guest force-flags in preparation of entering the nested-guest.
1209 *
1210 * @param pVCpu The cross context virtual CPU structure.
1211 */
1212IEM_STATIC void iemVmxVmentrySaveNmiBlockingFF(PVMCPU pVCpu)
1213{
1214 /* We shouldn't be called multiple times during VM-entry. */
1215 Assert(pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions == 0);
1216
1217 /* MTF should not be set outside VMX non-root mode. */
1218 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
1219
1220 /*
1221 * Preserve the required force-flags.
1222 *
1223 * We cache and clear force-flags that would affect the execution of the
1224 * nested-guest. Cached flags are then restored while returning to the guest
1225 * if necessary.
1226 *
1227 * - VMCPU_FF_INHIBIT_INTERRUPTS need not be cached as it only affects
1228 * interrupts until the completion of the current VMLAUNCH/VMRESUME
1229 * instruction. Interrupt inhibition for any nested-guest instruction
1230 * is supplied by the guest-interruptibility state VMCS field and will
1231 * be set up as part of loading the guest state.
1232 *
1233 * - VMCPU_FF_BLOCK_NMIS needs to be cached as VM-exits caused before
1234 * successful VM-entry (due to invalid guest-state) need to continue
1235 * blocking NMIs if it was in effect before VM-entry.
1236 *
1237 * - MTF need not be preserved as it's used only in VMX non-root mode and
1238 * is supplied through the VM-execution controls.
1239 *
1240 * The remaining FFs (e.g. timers, APIC updates) can stay in place so that
1241 * we will be able to generate interrupts that may cause VM-exits for
1242 * the nested-guest.
1243 */
1244 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
1245}
1246
1247
1248/**
1249 * Restores the guest force-flags in preparation of exiting the nested-guest.
1250 *
1251 * @param pVCpu The cross context virtual CPU structure.
1252 */
1253IEM_STATIC void iemVmxVmexitRestoreNmiBlockingFF(PVMCPU pVCpu)
1254{
1255 if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
1256 {
1257 VMCPU_FF_SET_MASK(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);
1258 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;
1259 }
1260}
1261
1262
1263/**
1264 * Perform a VMX transition updated PGM, IEM and CPUM.
1265 *
1266 * @param pVCpu The cross context virtual CPU structure.
1267 */
1268IEM_STATIC int iemVmxWorldSwitch(PVMCPU pVCpu)
1269{
1270 /*
1271 * Inform PGM about paging mode changes.
1272 * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
1273 * see comment in iemMemPageTranslateAndCheckAccess().
1274 */
1275 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
1276# ifdef IN_RING3
1277 Assert(rc != VINF_PGM_CHANGE_MODE);
1278# endif
1279 AssertRCReturn(rc, rc);
1280
1281 /* Inform CPUM (recompiler), can later be removed. */
1282 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1283
1284 /*
1285 * Flush the TLB with new CR3. This is required in case the PGM mode change
1286 * above doesn't actually change anything.
1287 */
1288 if (rc == VINF_SUCCESS)
1289 {
1290 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true);
1291 AssertRCReturn(rc, rc);
1292 }
1293
1294 /* Re-initialize IEM cache/state after the drastic mode switch. */
1295 iemReInitExec(pVCpu);
1296 return rc;
1297}
1298
1299
1300/**
1301 * Calculates the current VMX-preemption timer value.
1302 *
1303 * @returns The current VMX-preemption timer value.
1304 * @param pVCpu The cross context virtual CPU structure.
1305 */
1306IEM_STATIC uint32_t iemVmxCalcPreemptTimer(PVMCPU pVCpu)
1307{
1308 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1309 Assert(pVmcs);
1310
1311 /*
1312 * Assume the following:
1313 * PreemptTimerShift = 5
1314 * VmcsPreemptTimer = 2 (i.e. need to decrement by 1 every 2 * RT_BIT(5) = 20000 TSC ticks)
1315 * EntryTick = 50000 (TSC at time of VM-entry)
1316 *
1317 * CurTick Delta PreemptTimerVal
1318 * ----------------------------------
1319 * 60000 10000 2
1320 * 80000 30000 1
1321 * 90000 40000 0 -> VM-exit.
1322 *
1323 * If Delta >= VmcsPreemptTimer * RT_BIT(PreemptTimerShift) cause a VMX-preemption timer VM-exit.
1324 * The saved VMX-preemption timer value is calculated as follows:
1325 * PreemptTimerVal = VmcsPreemptTimer - (Delta / (VmcsPreemptTimer * RT_BIT(PreemptTimerShift)))
1326 * E.g.:
1327 * Delta = 10000
1328 * Tmp = 10000 / (2 * 10000) = 0.5
1329 * NewPt = 2 - 0.5 = 2
1330 * Delta = 30000
1331 * Tmp = 30000 / (2 * 10000) = 1.5
1332 * NewPt = 2 - 1.5 = 1
1333 * Delta = 40000
1334 * Tmp = 40000 / 20000 = 2
1335 * NewPt = 2 - 2 = 0
1336 */
1337 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
1338 uint64_t const uCurTick = TMCpuTickGetNoCheck(pVCpu);
1339 uint64_t const uEntryTick = pVCpu->cpum.GstCtx.hwvirt.vmx.uEntryTick;
1340 uint64_t const uDelta = uCurTick - uEntryTick;
1341 uint32_t const uVmcsPreemptVal = pVmcs->u32PreemptTimer;
1342 uint32_t const uPreemptTimer = uVmcsPreemptVal
1343 - ASMDivU64ByU32RetU32(uDelta, uVmcsPreemptVal * RT_BIT(VMX_V_PREEMPT_TIMER_SHIFT));
1344 return uPreemptTimer;
1345}
1346
1347
1348/**
1349 * Saves guest segment registers, GDTR, IDTR, LDTR, TR as part of VM-exit.
1350 *
1351 * @param pVCpu The cross context virtual CPU structure.
1352 */
1353IEM_STATIC void iemVmxVmexitSaveGuestSegRegs(PVMCPU pVCpu)
1354{
1355 /*
1356 * Save guest segment registers, GDTR, IDTR, LDTR, TR.
1357 * See Intel spec 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
1358 */
1359 /* CS, SS, ES, DS, FS, GS. */
1360 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1361 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
1362 {
1363 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1364 if (!pSelReg->Attr.n.u1Unusable)
1365 iemVmxVmcsSetGuestSegReg(pVmcs, iSegReg, pSelReg);
1366 else
1367 {
1368 /*
1369 * For unusable segments the attributes are undefined except for CS and SS.
1370 * For the rest we don't bother preserving anything but the unusable bit.
1371 */
1372 switch (iSegReg)
1373 {
1374 case X86_SREG_CS:
1375 pVmcs->GuestCs = pSelReg->Sel;
1376 pVmcs->u64GuestCsBase.u = pSelReg->u64Base;
1377 pVmcs->u32GuestCsLimit = pSelReg->u32Limit;
1378 pVmcs->u32GuestCsAttr = pSelReg->Attr.u & ( X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
1379 | X86DESCATTR_UNUSABLE);
1380 break;
1381
1382 case X86_SREG_SS:
1383 pVmcs->GuestSs = pSelReg->Sel;
1384 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1385 pVmcs->u64GuestSsBase.u &= UINT32_C(0xffffffff);
1386 pVmcs->u32GuestSsAttr = pSelReg->Attr.u & (X86DESCATTR_DPL | X86DESCATTR_UNUSABLE);
1387 break;
1388
1389 case X86_SREG_DS:
1390 pVmcs->GuestDs = pSelReg->Sel;
1391 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1392 pVmcs->u64GuestDsBase.u &= UINT32_C(0xffffffff);
1393 pVmcs->u32GuestDsAttr = X86DESCATTR_UNUSABLE;
1394 break;
1395
1396 case X86_SREG_ES:
1397 pVmcs->GuestEs = pSelReg->Sel;
1398 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1399 pVmcs->u64GuestEsBase.u &= UINT32_C(0xffffffff);
1400 pVmcs->u32GuestEsAttr = X86DESCATTR_UNUSABLE;
1401 break;
1402
1403 case X86_SREG_FS:
1404 pVmcs->GuestFs = pSelReg->Sel;
1405 pVmcs->u64GuestFsBase.u = pSelReg->u64Base;
1406 pVmcs->u32GuestFsAttr = X86DESCATTR_UNUSABLE;
1407 break;
1408
1409 case X86_SREG_GS:
1410 pVmcs->GuestGs = pSelReg->Sel;
1411 pVmcs->u64GuestGsBase.u = pSelReg->u64Base;
1412 pVmcs->u32GuestGsAttr = X86DESCATTR_UNUSABLE;
1413 break;
1414 }
1415 }
1416 }
1417
1418 /* Segment attribute bits 31:17 and 11:8 MBZ. */
1419 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
1420 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
1421 | X86DESCATTR_UNUSABLE;
1422 /* LDTR. */
1423 {
1424 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.ldtr;
1425 pVmcs->GuestLdtr = pSelReg->Sel;
1426 pVmcs->u64GuestLdtrBase.u = pSelReg->u64Base;
1427 Assert(X86_IS_CANONICAL(pSelReg->u64Base));
1428 pVmcs->u32GuestLdtrLimit = pSelReg->u32Limit;
1429 pVmcs->u32GuestLdtrAttr = pSelReg->Attr.u & fValidAttrMask;
1430 }
1431
1432 /* TR. */
1433 {
1434 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.tr;
1435 pVmcs->GuestTr = pSelReg->Sel;
1436 pVmcs->u64GuestTrBase.u = pSelReg->u64Base;
1437 pVmcs->u32GuestTrLimit = pSelReg->u32Limit;
1438 pVmcs->u32GuestTrAttr = pSelReg->Attr.u & fValidAttrMask;
1439 }
1440
1441 /* GDTR. */
1442 pVmcs->u64GuestGdtrBase.u = pVCpu->cpum.GstCtx.gdtr.pGdt;
1443 pVmcs->u32GuestGdtrLimit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
1444
1445 /* IDTR. */
1446 pVmcs->u64GuestIdtrBase.u = pVCpu->cpum.GstCtx.idtr.pIdt;
1447 pVmcs->u32GuestIdtrLimit = pVCpu->cpum.GstCtx.idtr.cbIdt;
1448}
1449
1450
1451/**
1452 * Saves guest non-register state as part of VM-exit.
1453 *
1454 * @param pVCpu The cross context virtual CPU structure.
1455 * @param uExitReason The VM-exit reason.
1456 */
1457IEM_STATIC void iemVmxVmexitSaveGuestNonRegState(PVMCPU pVCpu, uint32_t uExitReason)
1458{
1459 /*
1460 * Save guest non-register state.
1461 * See Intel spec. 27.3.4 "Saving Non-Register State".
1462 */
1463 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1464
1465 /*
1466 * Activity state.
1467 * Most VM-exits will occur in the active state. However, if the first instruction
1468 * following the VM-entry is a HLT instruction, and the MTF VM-execution control is set,
1469 * the VM-exit will be from the HLT activity state.
1470 *
1471 * See Intel spec. 25.5.2 "Monitor Trap Flag".
1472 */
1473 /** @todo NSTVMX: Does triple-fault VM-exit reflect a shutdown activity state or
1474 * not? */
1475 EMSTATE const enmActivityState = EMGetState(pVCpu);
1476 switch (enmActivityState)
1477 {
1478 case EMSTATE_HALTED: pVmcs->u32GuestActivityState = VMX_VMCS_GUEST_ACTIVITY_HLT; break;
1479 default: pVmcs->u32GuestActivityState = VMX_VMCS_GUEST_ACTIVITY_ACTIVE; break;
1480 }
1481
1482 /*
1483 * Interruptibility-state.
1484 */
1485 /* NMI. */
1486 pVmcs->u32GuestIntrState = 0;
1487 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
1488 {
1489 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
1490 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1491 }
1492 else
1493 {
1494 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1495 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1496 }
1497
1498 /* Blocking-by-STI. */
1499 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1500 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
1501 {
1502 /** @todo NSTVMX: We can't distinguish between blocking-by-MovSS and blocking-by-STI
1503 * currently. */
1504 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1505 }
1506 /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */
1507
1508 /*
1509 * Pending debug exceptions.
1510 */
1511 if ( uExitReason != VMX_EXIT_INIT_SIGNAL
1512 && uExitReason != VMX_EXIT_SMI
1513 && uExitReason != VMX_EXIT_ERR_MACHINE_CHECK
1514 && !HMVmxIsVmexitTrapLike(uExitReason))
1515 {
1516 /** @todo NSTVMX: also must exclude VM-exits caused by debug exceptions when
1517 * block-by-MovSS is in effect. */
1518 pVmcs->u64GuestPendingDbgXcpt.u = 0;
1519 }
1520 else
1521 {
1522 /*
1523 * Pending debug exception field is identical to DR6 except the RTM bit (16) which needs to be flipped.
1524 * The "enabled breakpoint" bit (12) is not present in DR6, so we need to update it here.
1525 *
1526 * See Intel spec. 24.4.2 "Guest Non-Register State".
1527 */
1528 /** @todo r=ramshankar: NSTVMX: I'm not quite sure if we can simply derive this from
1529 * DR6. */
1530 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR6);
1531 uint64_t fPendingDbgMask = pVCpu->cpum.GstCtx.dr[6];
1532 uint64_t const fBpHitMask = VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BP0 | VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BP1
1533 | VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BP2 | VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BP3;
1534 if (fPendingDbgMask & fBpHitMask)
1535 fPendingDbgMask |= VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_EN_BP;
1536 fPendingDbgMask ^= VMX_VMCS_GUEST_PENDING_DEBUG_RTM;
1537 pVmcs->u64GuestPendingDbgXcpt.u = fPendingDbgMask;
1538 }
1539
1540 /*
1541 * Save the VMX-preemption timer value back into the VMCS if the feature is enabled.
1542 *
1543 * For VMX-preemption timer VM-exits, we should have already written back 0 if the
1544 * feature is supported back into the VMCS, and thus there is nothing further to do here.
1545 */
1546 if ( uExitReason != VMX_EXIT_PREEMPT_TIMER
1547 && (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
1548 pVmcs->u32PreemptTimer = iemVmxCalcPreemptTimer(pVCpu);
1549
1550 /* PDPTEs. */
1551 /* We don't support EPT yet. */
1552 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
1553 pVmcs->u64GuestPdpte0.u = 0;
1554 pVmcs->u64GuestPdpte1.u = 0;
1555 pVmcs->u64GuestPdpte2.u = 0;
1556 pVmcs->u64GuestPdpte3.u = 0;
1557}
1558
1559
1560/**
1561 * Saves the guest-state as part of VM-exit.
1562 *
1563 * @returns VBox status code.
1564 * @param pVCpu The cross context virtual CPU structure.
1565 * @param uExitReason The VM-exit reason.
1566 */
1567IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPU pVCpu, uint32_t uExitReason)
1568{
1569 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1570 Assert(pVmcs);
1571
1572 iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu);
1573 iemVmxVmexitSaveGuestSegRegs(pVCpu);
1574
1575 pVmcs->u64GuestRip.u = pVCpu->cpum.GstCtx.rip;
1576 pVmcs->u64GuestRsp.u = pVCpu->cpum.GstCtx.rsp;
1577 pVmcs->u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u; /** @todo NSTVMX: Check RFLAGS.RF handling. */
1578
1579 iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason);
1580}
1581
1582
1583/**
1584 * Saves the guest MSRs into the VM-exit MSR-store area as part of VM-exit.
1585 *
1586 * @returns VBox status code.
1587 * @param pVCpu The cross context virtual CPU structure.
1588 * @param uExitReason The VM-exit reason (for diagnostic purposes).
1589 */
1590IEM_STATIC int iemVmxVmexitSaveGuestAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
1591{
1592 /*
1593 * Save guest MSRs.
1594 * See Intel spec. 27.4 "Saving MSRs".
1595 */
1596 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1597 const char *const pszFailure = "VMX-abort";
1598
1599 /*
1600 * The VM-exit MSR-store area address need not be a valid guest-physical address if the
1601 * VM-exit MSR-store count is 0. If this is the case, bail early without reading it.
1602 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
1603 */
1604 uint32_t const cMsrs = pVmcs->u32ExitMsrStoreCount;
1605 if (!cMsrs)
1606 return VINF_SUCCESS;
1607
1608 /*
1609 * Verify the MSR auto-store count. Physical CPUs can behave unpredictably if the count
1610 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
1611 * implementation causes a VMX-abort followed by a triple-fault.
1612 */
1613 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
1614 if (fIsMsrCountValid)
1615 { /* likely */ }
1616 else
1617 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount);
1618
1619 /*
1620 * Optimization if the guest hypervisor is using the same guest-physical page for both
1621 * the VM-entry MSR-load area as well as the VM-exit MSR store area.
1622 */
1623 PVMXAUTOMSR pMsrArea;
1624 RTGCPHYS const GCPhysVmEntryMsrLoadArea = pVmcs->u64AddrEntryMsrLoad.u;
1625 RTGCPHYS const GCPhysVmExitMsrStoreArea = pVmcs->u64AddrExitMsrStore.u;
1626 if (GCPhysVmEntryMsrLoadArea == GCPhysVmExitMsrStoreArea)
1627 pMsrArea = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pEntryMsrLoadArea);
1628 else
1629 {
1630 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrStoreArea),
1631 GCPhysVmExitMsrStoreArea, cMsrs * sizeof(VMXAUTOMSR));
1632 if (RT_SUCCESS(rc))
1633 pMsrArea = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrStoreArea);
1634 else
1635 {
1636 AssertMsgFailed(("VM-exit: Failed to read MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrStoreArea, rc));
1637 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrReadPhys);
1638 }
1639 }
1640
1641 /*
1642 * Update VM-exit MSR store area.
1643 */
1644 PVMXAUTOMSR pMsr = pMsrArea;
1645 Assert(pMsr);
1646 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
1647 {
1648 if ( !pMsr->u32Reserved
1649 && pMsr->u32Msr != MSR_IA32_SMBASE
1650 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
1651 {
1652 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pMsr->u32Msr, &pMsr->u64Value);
1653 if (rcStrict == VINF_SUCCESS)
1654 continue;
1655
1656 /*
1657 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
1658 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
1659 * recording the MSR index in the auxiliary info. field and indicated further by our
1660 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
1661 * if possible, or come up with a better, generic solution.
1662 */
1663 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
1664 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_READ
1665 ? kVmxVDiag_Vmexit_MsrStoreRing3
1666 : kVmxVDiag_Vmexit_MsrStore;
1667 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
1668 }
1669 else
1670 {
1671 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
1672 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreRsvd);
1673 }
1674 }
1675
1676 /*
1677 * Commit the VM-exit MSR store are to guest memory.
1678 */
1679 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmExitMsrStoreArea, pMsrArea, cMsrs * sizeof(VMXAUTOMSR));
1680 if (RT_SUCCESS(rc))
1681 return VINF_SUCCESS;
1682
1683 NOREF(uExitReason);
1684 NOREF(pszFailure);
1685
1686 AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrStoreArea, rc));
1687 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys);
1688}
1689
1690
1691/**
1692 * Performs a VMX abort (due to an fatal error during VM-exit).
1693 *
1694 * @returns Strict VBox status code.
1695 * @param pVCpu The cross context virtual CPU structure.
1696 * @param enmAbort The VMX abort reason.
1697 */
1698IEM_STATIC VBOXSTRICTRC iemVmxAbort(PVMCPU pVCpu, VMXABORT enmAbort)
1699{
1700 /*
1701 * Perform the VMX abort.
1702 * See Intel spec. 27.7 "VMX Aborts".
1703 */
1704 LogFunc(("enmAbort=%u (%s) -> RESET\n", enmAbort, HMGetVmxAbortDesc(enmAbort)));
1705
1706 /* We don't support SMX yet. */
1707 pVCpu->cpum.GstCtx.hwvirt.vmx.enmAbort = enmAbort;
1708 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1709 {
1710 RTGCPHYS const GCPhysVmcs = IEM_VMX_GET_CURRENT_VMCS(pVCpu);
1711 uint32_t const offVmxAbort = RT_UOFFSETOF(VMXVVMCS, enmVmxAbort);
1712 PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + offVmxAbort, &enmAbort, sizeof(enmAbort));
1713 }
1714
1715 return VINF_EM_TRIPLE_FAULT;
1716}
1717
1718
1719/**
1720 * Loads host control registers, debug registers and MSRs as part of VM-exit.
1721 *
1722 * @param pVCpu The cross context virtual CPU structure.
1723 */
1724IEM_STATIC void iemVmxVmexitLoadHostControlRegsMsrs(PVMCPU pVCpu)
1725{
1726 /*
1727 * Load host control registers, debug registers and MSRs.
1728 * See Intel spec. 27.5.1 "Loading Host Control Registers, Debug Registers, MSRs".
1729 */
1730 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1731 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
1732
1733 /* CR0. */
1734 {
1735 /* Bits 63:32, 28:19, 17, 15:6, ET, CD, NW and fixed CR0 bits are not modified. */
1736 uint64_t const uCr0Mb1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
1737 uint64_t const uCr0Mb0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
1738 uint64_t const fCr0IgnMask = UINT64_C(0xffffffff1ffaffc0) | X86_CR0_ET | X86_CR0_CD | X86_CR0_NW | uCr0Mb1 | ~uCr0Mb0;
1739 uint64_t const uHostCr0 = pVmcs->u64HostCr0.u;
1740 uint64_t const uGuestCr0 = pVCpu->cpum.GstCtx.cr0;
1741 uint64_t const uValidHostCr0 = (uHostCr0 & ~fCr0IgnMask) | (uGuestCr0 & fCr0IgnMask);
1742 CPUMSetGuestCR0(pVCpu, uValidHostCr0);
1743 }
1744
1745 /* CR4. */
1746 {
1747 /* Fixed CR4 bits are not modified. */
1748 uint64_t const uCr4Mb1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
1749 uint64_t const uCr4Mb0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
1750 uint64_t const fCr4IgnMask = uCr4Mb1 | ~uCr4Mb0;
1751 uint64_t const uHostCr4 = pVmcs->u64HostCr4.u;
1752 uint64_t const uGuestCr4 = pVCpu->cpum.GstCtx.cr4;
1753 uint64_t uValidHostCr4 = (uHostCr4 & ~fCr4IgnMask) | (uGuestCr4 & fCr4IgnMask);
1754 if (fHostInLongMode)
1755 uValidHostCr4 |= X86_CR4_PAE;
1756 else
1757 uValidHostCr4 &= ~X86_CR4_PCIDE;
1758 CPUMSetGuestCR4(pVCpu, uValidHostCr4);
1759 }
1760
1761 /* CR3 (host value validated while checking host-state during VM-entry). */
1762 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64HostCr3.u;
1763
1764 /* DR7. */
1765 pVCpu->cpum.GstCtx.dr[7] = X86_DR7_INIT_VAL;
1766
1767 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
1768
1769 /* Save SYSENTER CS, ESP, EIP (host value validated while checking host-state during VM-entry). */
1770 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64HostSysenterEip.u;
1771 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64HostSysenterEsp.u;
1772 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32HostSysenterCs;
1773
1774 /* FS, GS bases are loaded later while we load host segment registers. */
1775
1776 /* EFER MSR (host value validated while checking host-state during VM-entry). */
1777 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
1778 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64HostEferMsr.u;
1779 else if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1780 {
1781 if (fHostInLongMode)
1782 pVCpu->cpum.GstCtx.msrEFER |= (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
1783 else
1784 pVCpu->cpum.GstCtx.msrEFER &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
1785 }
1786
1787 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
1788
1789 /* PAT MSR (host value is validated while checking host-state during VM-entry). */
1790 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
1791 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64HostPatMsr.u;
1792
1793 /* We don't support IA32_BNDCFGS MSR yet. */
1794}
1795
1796
1797/**
1798 * Loads host segment registers, GDTR, IDTR, LDTR and TR as part of VM-exit.
1799 *
1800 * @param pVCpu The cross context virtual CPU structure.
1801 */
1802IEM_STATIC void iemVmxVmexitLoadHostSegRegs(PVMCPU pVCpu)
1803{
1804 /*
1805 * Load host segment registers, GDTR, IDTR, LDTR and TR.
1806 * See Intel spec. 27.5.2 "Loading Host Segment and Descriptor-Table Registers".
1807 *
1808 * Warning! Be careful to not touch fields that are reserved by VT-x,
1809 * e.g. segment limit high bits stored in segment attributes (in bits 11:8).
1810 */
1811 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1812 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
1813
1814 /* CS, SS, ES, DS, FS, GS. */
1815 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
1816 {
1817 RTSEL const HostSel = iemVmxVmcsGetHostSelReg(pVmcs, iSegReg);
1818 bool const fUnusable = RT_BOOL(HostSel == 0);
1819 PCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1820
1821 /* Selector. */
1822 pSelReg->Sel = HostSel;
1823 pSelReg->ValidSel = HostSel;
1824 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
1825
1826 /* Limit. */
1827 pSelReg->u32Limit = 0xffffffff;
1828
1829 /* Base. */
1830 pSelReg->u64Base = 0;
1831
1832 /* Attributes. */
1833 if (iSegReg == X86_SREG_CS)
1834 {
1835 pSelReg->Attr.n.u4Type = X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED;
1836 pSelReg->Attr.n.u1DescType = 1;
1837 pSelReg->Attr.n.u2Dpl = 0;
1838 pSelReg->Attr.n.u1Present = 1;
1839 pSelReg->Attr.n.u1Long = fHostInLongMode;
1840 pSelReg->Attr.n.u1DefBig = !fHostInLongMode;
1841 pSelReg->Attr.n.u1Granularity = 1;
1842 Assert(!pSelReg->Attr.n.u1Unusable);
1843 Assert(!fUnusable);
1844 }
1845 else
1846 {
1847 pSelReg->Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
1848 pSelReg->Attr.n.u1DescType = 1;
1849 pSelReg->Attr.n.u2Dpl = 0;
1850 pSelReg->Attr.n.u1Present = 1;
1851 pSelReg->Attr.n.u1DefBig = 1;
1852 pSelReg->Attr.n.u1Granularity = 1;
1853 pSelReg->Attr.n.u1Unusable = fUnusable;
1854 }
1855 }
1856
1857 /* FS base. */
1858 if ( !pVCpu->cpum.GstCtx.fs.Attr.n.u1Unusable
1859 || fHostInLongMode)
1860 {
1861 Assert(X86_IS_CANONICAL(pVmcs->u64HostFsBase.u));
1862 pVCpu->cpum.GstCtx.fs.u64Base = pVmcs->u64HostFsBase.u;
1863 }
1864
1865 /* GS base. */
1866 if ( !pVCpu->cpum.GstCtx.gs.Attr.n.u1Unusable
1867 || fHostInLongMode)
1868 {
1869 Assert(X86_IS_CANONICAL(pVmcs->u64HostGsBase.u));
1870 pVCpu->cpum.GstCtx.gs.u64Base = pVmcs->u64HostGsBase.u;
1871 }
1872
1873 /* TR. */
1874 Assert(X86_IS_CANONICAL(pVmcs->u64HostTrBase.u));
1875 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1Unusable);
1876 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->HostTr;
1877 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->HostTr;
1878 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
1879 pVCpu->cpum.GstCtx.tr.u32Limit = X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN;
1880 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64HostTrBase.u;
1881 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1882 pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType = 0;
1883 pVCpu->cpum.GstCtx.tr.Attr.n.u2Dpl = 0;
1884 pVCpu->cpum.GstCtx.tr.Attr.n.u1Present = 1;
1885 pVCpu->cpum.GstCtx.tr.Attr.n.u1DefBig = 0;
1886 pVCpu->cpum.GstCtx.tr.Attr.n.u1Granularity = 0;
1887
1888 /* LDTR (Warning! do not touch the base and limits here). */
1889 pVCpu->cpum.GstCtx.ldtr.Sel = 0;
1890 pVCpu->cpum.GstCtx.ldtr.ValidSel = 0;
1891 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1892 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
1893
1894 /* GDTR. */
1895 Assert(X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u));
1896 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64HostGdtrBase.u;
1897 pVCpu->cpum.GstCtx.gdtr.cbGdt = 0xffff;
1898
1899 /* IDTR.*/
1900 Assert(X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u));
1901 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64HostIdtrBase.u;
1902 pVCpu->cpum.GstCtx.idtr.cbIdt = 0xffff;
1903}
1904
1905
1906/**
1907 * Checks host PDPTes as part of VM-exit.
1908 *
1909 * @param pVCpu The cross context virtual CPU structure.
1910 * @param uExitReason The VM-exit reason (for logging purposes).
1911 */
1912IEM_STATIC int iemVmxVmexitCheckHostPdptes(PVMCPU pVCpu, uint32_t uExitReason)
1913{
1914 /*
1915 * Check host PDPTEs.
1916 * See Intel spec. 27.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
1917 */
1918 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1919 const char *const pszFailure = "VMX-abort";
1920 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
1921
1922 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
1923 && !fHostInLongMode)
1924 {
1925 uint64_t const uHostCr3 = pVCpu->cpum.GstCtx.cr3 & X86_CR3_PAE_PAGE_MASK;
1926 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
1927 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uHostCr3, sizeof(aPdptes));
1928 if (RT_SUCCESS(rc))
1929 {
1930 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
1931 {
1932 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
1933 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
1934 { /* likely */ }
1935 else
1936 {
1937 VMXVDIAG const enmDiag = iemVmxGetDiagVmexitPdpteRsvd(iPdpte);
1938 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
1939 }
1940 }
1941 }
1942 else
1943 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpteCr3ReadPhys);
1944 }
1945
1946 NOREF(pszFailure);
1947 NOREF(uExitReason);
1948 return VINF_SUCCESS;
1949}
1950
1951
1952/**
1953 * Loads the host MSRs from the VM-exit MSR-load area as part of VM-exit.
1954 *
1955 * @returns VBox status code.
1956 * @param pVCpu The cross context virtual CPU structure.
1957 * @param pszInstr The VMX instruction name (for logging purposes).
1958 */
1959IEM_STATIC int iemVmxVmexitLoadHostAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
1960{
1961 /*
1962 * Load host MSRs.
1963 * See Intel spec. 27.6 "Loading MSRs".
1964 */
1965 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1966 const char *const pszFailure = "VMX-abort";
1967
1968 /*
1969 * The VM-exit MSR-load area address need not be a valid guest-physical address if the
1970 * VM-exit MSR load count is 0. If this is the case, bail early without reading it.
1971 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
1972 */
1973 uint32_t const cMsrs = pVmcs->u32ExitMsrLoadCount;
1974 if (!cMsrs)
1975 return VINF_SUCCESS;
1976
1977 /*
1978 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count
1979 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
1980 * implementation causes a VMX-abort followed by a triple-fault.
1981 */
1982 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
1983 if (fIsMsrCountValid)
1984 { /* likely */ }
1985 else
1986 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount);
1987
1988 RTGCPHYS const GCPhysVmExitMsrLoadArea = pVmcs->u64AddrExitMsrLoad.u;
1989 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrLoadArea),
1990 GCPhysVmExitMsrLoadArea, cMsrs * sizeof(VMXAUTOMSR));
1991 if (RT_SUCCESS(rc))
1992 {
1993 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pExitMsrLoadArea);
1994 Assert(pMsr);
1995 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
1996 {
1997 if ( !pMsr->u32Reserved
1998 && pMsr->u32Msr != MSR_K8_FS_BASE
1999 && pMsr->u32Msr != MSR_K8_GS_BASE
2000 && pMsr->u32Msr != MSR_K6_EFER
2001 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
2002 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
2003 {
2004 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
2005 if (rcStrict == VINF_SUCCESS)
2006 continue;
2007
2008 /*
2009 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
2010 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
2011 * recording the MSR index in the auxiliary info. field and indicated further by our
2012 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
2013 * if possible, or come up with a better, generic solution.
2014 */
2015 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2016 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
2017 ? kVmxVDiag_Vmexit_MsrLoadRing3
2018 : kVmxVDiag_Vmexit_MsrLoad;
2019 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2020 }
2021 else
2022 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadRsvd);
2023 }
2024 }
2025 else
2026 {
2027 AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysVmExitMsrLoadArea, rc));
2028 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys);
2029 }
2030
2031 NOREF(uExitReason);
2032 NOREF(pszFailure);
2033 return VINF_SUCCESS;
2034}
2035
2036
2037/**
2038 * Loads the host state as part of VM-exit.
2039 *
2040 * @returns Strict VBox status code.
2041 * @param pVCpu The cross context virtual CPU structure.
2042 * @param uExitReason The VM-exit reason (for logging purposes).
2043 */
2044IEM_STATIC VBOXSTRICTRC iemVmxVmexitLoadHostState(PVMCPU pVCpu, uint32_t uExitReason)
2045{
2046 /*
2047 * Load host state.
2048 * See Intel spec. 27.5 "Loading Host State".
2049 */
2050 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2051 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2052
2053 /* We cannot return from a long-mode guest to a host that is not in long mode. */
2054 if ( CPUMIsGuestInLongMode(pVCpu)
2055 && !fHostInLongMode)
2056 {
2057 Log(("VM-exit from long-mode guest to host not in long-mode -> VMX-Abort\n"));
2058 return iemVmxAbort(pVCpu, VMXABORT_HOST_NOT_IN_LONG_MODE);
2059 }
2060
2061 iemVmxVmexitLoadHostControlRegsMsrs(pVCpu);
2062 iemVmxVmexitLoadHostSegRegs(pVCpu);
2063
2064 /*
2065 * Load host RIP, RSP and RFLAGS.
2066 * See Intel spec. 27.5.3 "Loading Host RIP, RSP and RFLAGS"
2067 */
2068 pVCpu->cpum.GstCtx.rip = pVmcs->u64HostRip.u;
2069 pVCpu->cpum.GstCtx.rsp = pVmcs->u64HostRsp.u;
2070 pVCpu->cpum.GstCtx.rflags.u = X86_EFL_1;
2071
2072 /* Clear address range monitoring. */
2073 EMMonitorWaitClear(pVCpu);
2074
2075 /* Perform the VMX transition (PGM updates). */
2076 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
2077 if (rcStrict == VINF_SUCCESS)
2078 {
2079 /* Check host PDPTEs (only when we've fully switched page tables_. */
2080 /** @todo r=ramshankar: I don't know if PGM does this for us already or not... */
2081 int rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason);
2082 if (RT_FAILURE(rc))
2083 {
2084 Log(("VM-exit failed while restoring host PDPTEs -> VMX-Abort\n"));
2085 return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
2086 }
2087 }
2088 else if (RT_SUCCESS(rcStrict))
2089 {
2090 Log3(("VM-exit: iemVmxWorldSwitch returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),
2091 uExitReason));
2092 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2093 }
2094 else
2095 {
2096 Log3(("VM-exit: iemVmxWorldSwitch failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
2097 return VBOXSTRICTRC_VAL(rcStrict);
2098 }
2099
2100 Assert(rcStrict == VINF_SUCCESS);
2101
2102 /* Load MSRs from the VM-exit auto-load MSR area. */
2103 int rc = iemVmxVmexitLoadHostAutoMsrs(pVCpu, uExitReason);
2104 if (RT_FAILURE(rc))
2105 {
2106 Log(("VM-exit failed while loading host MSRs -> VMX-Abort\n"));
2107 return iemVmxAbort(pVCpu, VMXABORT_LOAD_HOST_MSR);
2108 }
2109 return VINF_SUCCESS;
2110}
2111
2112
2113/**
2114 * Gets VM-exit instruction information along with any displacement for an
2115 * instruction VM-exit.
2116 *
2117 * @returns The VM-exit instruction information.
2118 * @param pVCpu The cross context virtual CPU structure.
2119 * @param uExitReason The VM-exit reason.
2120 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_XXX).
2121 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
2122 * NULL.
2123 */
2124IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, PRTGCPTR pGCPtrDisp)
2125{
2126 RTGCPTR GCPtrDisp;
2127 VMXEXITINSTRINFO ExitInstrInfo;
2128 ExitInstrInfo.u = 0;
2129
2130 /*
2131 * Get and parse the ModR/M byte from our decoded opcodes.
2132 */
2133 uint8_t bRm;
2134 uint8_t const offModRm = pVCpu->iem.s.offModRm;
2135 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
2136 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2137 {
2138 /*
2139 * ModR/M indicates register addressing.
2140 *
2141 * The primary/secondary register operands are reported in the iReg1 or iReg2
2142 * fields depending on whether it is a read/write form.
2143 */
2144 uint8_t idxReg1;
2145 uint8_t idxReg2;
2146 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
2147 {
2148 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
2149 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
2150 }
2151 else
2152 {
2153 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
2154 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
2155 }
2156 ExitInstrInfo.All.u2Scaling = 0;
2157 ExitInstrInfo.All.iReg1 = idxReg1;
2158 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
2159 ExitInstrInfo.All.fIsRegOperand = 1;
2160 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
2161 ExitInstrInfo.All.iSegReg = 0;
2162 ExitInstrInfo.All.iIdxReg = 0;
2163 ExitInstrInfo.All.fIdxRegInvalid = 1;
2164 ExitInstrInfo.All.iBaseReg = 0;
2165 ExitInstrInfo.All.fBaseRegInvalid = 1;
2166 ExitInstrInfo.All.iReg2 = idxReg2;
2167
2168 /* Displacement not applicable for register addressing. */
2169 GCPtrDisp = 0;
2170 }
2171 else
2172 {
2173 /*
2174 * ModR/M indicates memory addressing.
2175 */
2176 uint8_t uScale = 0;
2177 bool fBaseRegValid = false;
2178 bool fIdxRegValid = false;
2179 uint8_t iBaseReg = 0;
2180 uint8_t iIdxReg = 0;
2181 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
2182 {
2183 /*
2184 * Parse the ModR/M, displacement for 16-bit addressing mode.
2185 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
2186 */
2187 uint16_t u16Disp = 0;
2188 uint8_t const offDisp = offModRm + sizeof(bRm);
2189 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
2190 {
2191 /* Displacement without any registers. */
2192 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
2193 }
2194 else
2195 {
2196 /* Register (index and base). */
2197 switch (bRm & X86_MODRM_RM_MASK)
2198 {
2199 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2200 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2201 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2202 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2203 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2204 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2205 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
2206 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
2207 }
2208
2209 /* Register + displacement. */
2210 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2211 {
2212 case 0: break;
2213 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
2214 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
2215 default:
2216 {
2217 /* Register addressing, handled at the beginning. */
2218 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2219 break;
2220 }
2221 }
2222 }
2223
2224 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
2225 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
2226 }
2227 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
2228 {
2229 /*
2230 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
2231 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
2232 */
2233 uint32_t u32Disp = 0;
2234 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
2235 {
2236 /* Displacement without any registers. */
2237 uint8_t const offDisp = offModRm + sizeof(bRm);
2238 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
2239 }
2240 else
2241 {
2242 /* Register (and perhaps scale, index and base). */
2243 uint8_t offDisp = offModRm + sizeof(bRm);
2244 iBaseReg = (bRm & X86_MODRM_RM_MASK);
2245 if (iBaseReg == 4)
2246 {
2247 /* An SIB byte follows the ModR/M byte, parse it. */
2248 uint8_t bSib;
2249 uint8_t const offSib = offModRm + sizeof(bRm);
2250 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
2251
2252 /* A displacement may follow SIB, update its offset. */
2253 offDisp += sizeof(bSib);
2254
2255 /* Get the scale. */
2256 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
2257
2258 /* Get the index register. */
2259 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
2260 fIdxRegValid = RT_BOOL(iIdxReg != 4);
2261
2262 /* Get the base register. */
2263 iBaseReg = bSib & X86_SIB_BASE_MASK;
2264 fBaseRegValid = true;
2265 if (iBaseReg == 5)
2266 {
2267 if ((bRm & X86_MODRM_MOD_MASK) == 0)
2268 {
2269 /* Mod is 0 implies a 32-bit displacement with no base. */
2270 fBaseRegValid = false;
2271 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
2272 }
2273 else
2274 {
2275 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
2276 iBaseReg = X86_GREG_xBP;
2277 }
2278 }
2279 }
2280
2281 /* Register + displacement. */
2282 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2283 {
2284 case 0: /* Handled above */ break;
2285 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
2286 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
2287 default:
2288 {
2289 /* Register addressing, handled at the beginning. */
2290 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2291 break;
2292 }
2293 }
2294 }
2295
2296 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
2297 }
2298 else
2299 {
2300 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
2301
2302 /*
2303 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
2304 * See Intel instruction spec. 2.2 "IA-32e Mode".
2305 */
2306 uint64_t u64Disp = 0;
2307 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
2308 if (fRipRelativeAddr)
2309 {
2310 /*
2311 * RIP-relative addressing mode.
2312 *
2313 * The displacement is 32-bit signed implying an offset range of +/-2G.
2314 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
2315 */
2316 uint8_t const offDisp = offModRm + sizeof(bRm);
2317 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
2318 }
2319 else
2320 {
2321 uint8_t offDisp = offModRm + sizeof(bRm);
2322
2323 /*
2324 * Register (and perhaps scale, index and base).
2325 *
2326 * REX.B extends the most-significant bit of the base register. However, REX.B
2327 * is ignored while determining whether an SIB follows the opcode. Hence, we
2328 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
2329 *
2330 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
2331 */
2332 iBaseReg = (bRm & X86_MODRM_RM_MASK);
2333 if (iBaseReg == 4)
2334 {
2335 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
2336 uint8_t bSib;
2337 uint8_t const offSib = offModRm + sizeof(bRm);
2338 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
2339
2340 /* Displacement may follow SIB, update its offset. */
2341 offDisp += sizeof(bSib);
2342
2343 /* Get the scale. */
2344 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
2345
2346 /* Get the index. */
2347 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
2348 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
2349
2350 /* Get the base. */
2351 iBaseReg = (bSib & X86_SIB_BASE_MASK);
2352 fBaseRegValid = true;
2353 if (iBaseReg == 5)
2354 {
2355 if ((bRm & X86_MODRM_MOD_MASK) == 0)
2356 {
2357 /* Mod is 0 implies a signed 32-bit displacement with no base. */
2358 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
2359 }
2360 else
2361 {
2362 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
2363 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
2364 }
2365 }
2366 }
2367 iBaseReg |= pVCpu->iem.s.uRexB;
2368
2369 /* Register + displacement. */
2370 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2371 {
2372 case 0: /* Handled above */ break;
2373 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
2374 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
2375 default:
2376 {
2377 /* Register addressing, handled at the beginning. */
2378 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2379 break;
2380 }
2381 }
2382 }
2383
2384 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
2385 }
2386
2387 /*
2388 * The primary or secondary register operand is reported in iReg2 depending
2389 * on whether the primary operand is in read/write form.
2390 */
2391 uint8_t idxReg2;
2392 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
2393 {
2394 idxReg2 = bRm & X86_MODRM_RM_MASK;
2395 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
2396 idxReg2 |= pVCpu->iem.s.uRexB;
2397 }
2398 else
2399 {
2400 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK;
2401 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
2402 idxReg2 |= pVCpu->iem.s.uRexReg;
2403 }
2404 ExitInstrInfo.All.u2Scaling = uScale;
2405 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */
2406 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
2407 ExitInstrInfo.All.fIsRegOperand = 0;
2408 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
2409 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
2410 ExitInstrInfo.All.iIdxReg = iIdxReg;
2411 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
2412 ExitInstrInfo.All.iBaseReg = iBaseReg;
2413 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
2414 ExitInstrInfo.All.iReg2 = idxReg2;
2415 }
2416
2417 /*
2418 * Handle exceptions to the norm for certain instructions.
2419 * (e.g. some instructions convey an instruction identity in place of iReg2).
2420 */
2421 switch (uExitReason)
2422 {
2423 case VMX_EXIT_GDTR_IDTR_ACCESS:
2424 {
2425 Assert(VMXINSTRID_IS_VALID(uInstrId));
2426 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
2427 ExitInstrInfo.GdtIdt.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
2428 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
2429 break;
2430 }
2431
2432 case VMX_EXIT_LDTR_TR_ACCESS:
2433 {
2434 Assert(VMXINSTRID_IS_VALID(uInstrId));
2435 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
2436 ExitInstrInfo.LdtTr.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
2437 ExitInstrInfo.LdtTr.u2Undef0 = 0;
2438 break;
2439 }
2440
2441 case VMX_EXIT_RDRAND:
2442 case VMX_EXIT_RDSEED:
2443 {
2444 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
2445 break;
2446 }
2447 }
2448
2449 /* Update displacement and return the constructed VM-exit instruction information field. */
2450 if (pGCPtrDisp)
2451 *pGCPtrDisp = GCPtrDisp;
2452
2453 return ExitInstrInfo.u;
2454}
2455
2456
2457/**
2458 * VMX VM-exit handler.
2459 *
2460 * @returns Strict VBox status code.
2461 * @retval VINF_VMX_VMEXIT when the VM-exit is successful.
2462 * @retval VINF_EM_TRIPLE_FAULT when VM-exit is unsuccessful and leads to a
2463 * triple-fault.
2464 *
2465 * @param pVCpu The cross context virtual CPU structure.
2466 * @param uExitReason The VM-exit reason.
2467 * @param u64ExitQual The Exit qualification.
2468 */
2469IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
2470{
2471# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
2472 RT_NOREF3(pVCpu, uExitReason, u64ExitQual);
2473 return VINF_EM_RAW_EMULATE_INSTR;
2474# else
2475 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2476 Assert(pVmcs);
2477
2478 /*
2479 * Import all the guest-CPU state.
2480 *
2481 * HM on returning to guest execution would have to reset up a whole lot of state
2482 * anyway, (e.g., VM-entry/VM-exit controls) and we do not ever import a part of
2483 * the state and flag reloading the entire state on re-entry. So import the entire
2484 * state here, see HMNotifyVmxNstGstVmexit() for more comments.
2485 */
2486 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_ALL);
2487
2488 /* Ensure VM-entry interruption information valid bit isn't set. */
2489 Assert(!VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo));
2490
2491 /*
2492 * Update the VM-exit reason and Exit qualification.
2493 * Other VMCS read-only data fields are expected to be updated by the caller already.
2494 */
2495 pVmcs->u32RoExitReason = uExitReason;
2496 pVmcs->u64RoExitQual.u = u64ExitQual;
2497 Log3(("vmexit: uExitReason=%#RX32 u64ExitQual=%#RX64 cs:rip=%04x:%#RX64\n", uExitReason, pVmcs->u64RoExitQual.u,
2498 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2499
2500 /*
2501 * Update the IDT-vectoring information fields if the VM-exit is triggered during delivery of an event.
2502 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
2503 */
2504 {
2505 uint8_t uVector;
2506 uint32_t fFlags;
2507 uint32_t uErrCode;
2508 bool const fInEventDelivery = IEMGetCurrentXcpt(pVCpu, &uVector, &fFlags, &uErrCode, NULL /* uCr2 */);
2509 if (fInEventDelivery)
2510 {
2511 uint8_t const uIdtVectoringType = iemVmxGetEventType(uVector, fFlags);
2512 uint8_t const fErrCodeValid = RT_BOOL(fFlags & IEM_XCPT_FLAGS_ERR);
2513 uint32_t const uIdtVectoringInfo = RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_VECTOR, uVector)
2514 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_TYPE, uIdtVectoringType)
2515 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_ERR_CODE_VALID, fErrCodeValid)
2516 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_VALID, 1);
2517 iemVmxVmcsSetIdtVectoringInfo(pVCpu, uIdtVectoringInfo);
2518 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, uErrCode);
2519 }
2520 }
2521
2522 /* The following VMCS fields should always be zero since we don't support injecting SMIs into a guest. */
2523 Assert(pVmcs->u64RoIoRcx.u == 0);
2524 Assert(pVmcs->u64RoIoRsi.u == 0);
2525 Assert(pVmcs->u64RoIoRdi.u == 0);
2526 Assert(pVmcs->u64RoIoRip.u == 0);
2527
2528 /* We should not cause an NMI-window/interrupt-window VM-exit when injecting events as part of VM-entry. */
2529 if (!pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents)
2530 {
2531 Assert(uExitReason != VMX_EXIT_NMI_WINDOW);
2532 Assert(uExitReason != VMX_EXIT_INT_WINDOW);
2533 }
2534
2535 /*
2536 * Save the guest state back into the VMCS.
2537 * We only need to save the state when the VM-entry was successful.
2538 */
2539 bool const fVmentryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
2540 if (!fVmentryFailed)
2541 {
2542 /*
2543 * If we support storing EFER.LMA into IA32e-mode guest field on VM-exit, we need to do that now.
2544 * See Intel spec. 27.2 "Recording VM-exit Information And Updating VM-entry Control".
2545 *
2546 * It is not clear from the Intel spec. if this is done only when VM-entry succeeds.
2547 * If a VM-exit happens before loading guest EFER, we risk restoring the host EFER.LMA
2548 * as guest-CPU state would not been modified. Hence for now, we do this only when
2549 * the VM-entry succeeded.
2550 */
2551 /** @todo r=ramshankar: Figure out if this bit gets set to host EFER.LMA on real
2552 * hardware when VM-exit fails during VM-entry (e.g. VERR_VMX_INVALID_GUEST_STATE). */
2553 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxExitSaveEferLma)
2554 {
2555 if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
2556 pVmcs->u32EntryCtls |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
2557 else
2558 pVmcs->u32EntryCtls &= ~VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
2559 }
2560
2561 /*
2562 * The rest of the high bits of the VM-exit reason are only relevant when the VM-exit
2563 * occurs in enclave mode/SMM which we don't support yet.
2564 *
2565 * If we ever add support for it, we can pass just the lower bits to the functions
2566 * below, till then an assert should suffice.
2567 */
2568 Assert(!RT_HI_U16(uExitReason));
2569
2570 /* Save the guest state into the VMCS and restore guest MSRs from the auto-store guest MSR area. */
2571 iemVmxVmexitSaveGuestState(pVCpu, uExitReason);
2572 int rc = iemVmxVmexitSaveGuestAutoMsrs(pVCpu, uExitReason);
2573 if (RT_SUCCESS(rc))
2574 { /* likely */ }
2575 else
2576 return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS);
2577
2578 /* Clear any saved NMI-blocking state so we don't assert on next VM-entry (if it was in effect on the previous one). */
2579 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions &= ~VMCPU_FF_BLOCK_NMIS;
2580 }
2581 else
2582 {
2583 /* Restore the NMI-blocking state if VM-entry failed due to invalid guest state or while loading MSRs. */
2584 uint32_t const uExitReasonBasic = VMX_EXIT_REASON_BASIC(uExitReason);
2585 if ( uExitReasonBasic == VMX_EXIT_ERR_INVALID_GUEST_STATE
2586 || uExitReasonBasic == VMX_EXIT_ERR_MSR_LOAD)
2587 iemVmxVmexitRestoreNmiBlockingFF(pVCpu);
2588 }
2589
2590 /*
2591 * Clear any pending VMX nested-guest force-flags.
2592 * These force-flags have no effect on guest execution and will
2593 * be re-evaluated and setup on the next nested-guest VM-entry.
2594 */
2595 VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER
2596 | VMCPU_FF_VMX_MTF
2597 | VMCPU_FF_VMX_APIC_WRITE
2598 | VMCPU_FF_VMX_INT_WINDOW
2599 | VMCPU_FF_VMX_NMI_WINDOW);
2600
2601 /* Restore the host (outer guest) state. */
2602 VBOXSTRICTRC rcStrict = iemVmxVmexitLoadHostState(pVCpu, uExitReason);
2603 if (RT_SUCCESS(rcStrict))
2604 {
2605 Assert(rcStrict == VINF_SUCCESS);
2606 rcStrict = VINF_VMX_VMEXIT;
2607 }
2608 else
2609 Log3(("vmexit: Loading host-state failed. uExitReason=%u rc=%Rrc\n", uExitReason, VBOXSTRICTRC_VAL(rcStrict)));
2610
2611 /* Notify HM that we've completed the VM-exit. */
2612 HMNotifyVmxNstGstVmexit(pVCpu, &pVCpu->cpum.GstCtx);
2613
2614 /* We're no longer in nested-guest execution mode. */
2615 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
2616
2617# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
2618 /* Revert any IEM-only nested-guest execution policy, otherwise return rcStrict. */
2619 Log(("vmexit: Disabling IEM-only EM execution policy!\n"));
2620 int rcSched = EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
2621 if (rcSched != VINF_SUCCESS)
2622 iemSetPassUpStatus(pVCpu, rcSched);
2623# endif
2624 return rcStrict;
2625# endif
2626}
2627
2628
2629/**
2630 * VMX VM-exit handler for VM-exits due to instruction execution.
2631 *
2632 * This is intended for instructions where the caller provides all the relevant
2633 * VM-exit information.
2634 *
2635 * @returns Strict VBox status code.
2636 * @param pVCpu The cross context virtual CPU structure.
2637 * @param pExitInfo Pointer to the VM-exit information.
2638 */
2639IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
2640{
2641 /*
2642 * For instructions where any of the following fields are not applicable:
2643 * - Exit qualification must be cleared.
2644 * - VM-exit instruction info. is undefined.
2645 * - Guest-linear address is undefined.
2646 * - Guest-physical address is undefined.
2647 *
2648 * The VM-exit instruction length is mandatory for all VM-exits that are caused by
2649 * instruction execution. For VM-exits that are not due to instruction execution this
2650 * field is undefined.
2651 *
2652 * In our implementation in IEM, all undefined fields are generally cleared. However,
2653 * if the caller supplies information (from say the physical CPU directly) it is
2654 * then possible that the undefined fields are not cleared.
2655 *
2656 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2657 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
2658 */
2659 Assert(pExitInfo);
2660 AssertMsg(pExitInfo->uReason <= VMX_EXIT_MAX, ("uReason=%u\n", pExitInfo->uReason));
2661 AssertMsg(pExitInfo->cbInstr >= 1 && pExitInfo->cbInstr <= 15,
2662 ("uReason=%u cbInstr=%u\n", pExitInfo->uReason, pExitInfo->cbInstr));
2663
2664 /* Update all the relevant fields from the VM-exit instruction information struct. */
2665 iemVmxVmcsSetExitInstrInfo(pVCpu, pExitInfo->InstrInfo.u);
2666 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, pExitInfo->u64GuestLinearAddr);
2667 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, pExitInfo->u64GuestPhysAddr);
2668 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
2669
2670 /* Perform the VM-exit. */
2671 return iemVmxVmexit(pVCpu, pExitInfo->uReason, pExitInfo->u64Qual);
2672}
2673
2674
2675/**
2676 * VMX VM-exit handler for VM-exits due to instruction execution.
2677 *
2678 * This is intended for instructions that only provide the VM-exit instruction
2679 * length.
2680 *
2681 * @param pVCpu The cross context virtual CPU structure.
2682 * @param uExitReason The VM-exit reason.
2683 * @param cbInstr The instruction length in bytes.
2684 */
2685IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstr(PVMCPU pVCpu, uint32_t uExitReason, uint8_t cbInstr)
2686{
2687 VMXVEXITINFO ExitInfo;
2688 RT_ZERO(ExitInfo);
2689 ExitInfo.uReason = uExitReason;
2690 ExitInfo.cbInstr = cbInstr;
2691
2692#ifdef VBOX_STRICT
2693 /*
2694 * To prevent us from shooting ourselves in the foot.
2695 * The follow instructions should convey more than just the instruction length.
2696 */
2697 switch (uExitReason)
2698 {
2699 case VMX_EXIT_INVEPT:
2700 case VMX_EXIT_INVPCID:
2701 case VMX_EXIT_INVVPID:
2702 case VMX_EXIT_LDTR_TR_ACCESS:
2703 case VMX_EXIT_GDTR_IDTR_ACCESS:
2704 case VMX_EXIT_VMCLEAR:
2705 case VMX_EXIT_VMPTRLD:
2706 case VMX_EXIT_VMPTRST:
2707 case VMX_EXIT_VMREAD:
2708 case VMX_EXIT_VMWRITE:
2709 case VMX_EXIT_VMXON:
2710 case VMX_EXIT_XRSTORS:
2711 case VMX_EXIT_XSAVES:
2712 case VMX_EXIT_RDRAND:
2713 case VMX_EXIT_RDSEED:
2714 case VMX_EXIT_IO_INSTR:
2715 AssertMsgFailedReturn(("Use iemVmxVmexitInstrNeedsInfo for uExitReason=%u\n", uExitReason), VERR_IEM_IPE_5);
2716 break;
2717 }
2718#endif
2719
2720 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2721}
2722
2723
2724/**
2725 * VMX VM-exit handler for VM-exits due to instruction execution.
2726 *
2727 * This is intended for instructions that have a ModR/M byte and update the VM-exit
2728 * instruction information and Exit qualification fields.
2729 *
2730 * @param pVCpu The cross context virtual CPU structure.
2731 * @param uExitReason The VM-exit reason.
2732 * @param uInstrid The instruction identity (VMXINSTRID_XXX).
2733 * @param cbInstr The instruction length in bytes.
2734 *
2735 * @remarks Do not use this for INS/OUTS instruction.
2736 */
2737IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr)
2738{
2739 VMXVEXITINFO ExitInfo;
2740 RT_ZERO(ExitInfo);
2741 ExitInfo.uReason = uExitReason;
2742 ExitInfo.cbInstr = cbInstr;
2743
2744 /*
2745 * Update the Exit qualification field with displacement bytes.
2746 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2747 */
2748 switch (uExitReason)
2749 {
2750 case VMX_EXIT_INVEPT:
2751 case VMX_EXIT_INVPCID:
2752 case VMX_EXIT_INVVPID:
2753 case VMX_EXIT_LDTR_TR_ACCESS:
2754 case VMX_EXIT_GDTR_IDTR_ACCESS:
2755 case VMX_EXIT_VMCLEAR:
2756 case VMX_EXIT_VMPTRLD:
2757 case VMX_EXIT_VMPTRST:
2758 case VMX_EXIT_VMREAD:
2759 case VMX_EXIT_VMWRITE:
2760 case VMX_EXIT_VMXON:
2761 case VMX_EXIT_XRSTORS:
2762 case VMX_EXIT_XSAVES:
2763 case VMX_EXIT_RDRAND:
2764 case VMX_EXIT_RDSEED:
2765 {
2766 /* Construct the VM-exit instruction information. */
2767 RTGCPTR GCPtrDisp;
2768 uint32_t const uInstrInfo = iemVmxGetExitInstrInfo(pVCpu, uExitReason, uInstrId, &GCPtrDisp);
2769
2770 /* Update the VM-exit instruction information. */
2771 ExitInfo.InstrInfo.u = uInstrInfo;
2772
2773 /* Update the Exit qualification. */
2774 ExitInfo.u64Qual = GCPtrDisp;
2775 break;
2776 }
2777
2778 default:
2779 AssertMsgFailedReturn(("Use instruction-specific handler\n"), VERR_IEM_IPE_5);
2780 break;
2781 }
2782
2783 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2784}
2785
2786
2787/**
2788 * VMX VM-exit handler for VM-exits due to INVLPG.
2789 *
2790 * @returns Strict VBox status code.
2791 * @param pVCpu The cross context virtual CPU structure.
2792 * @param GCPtrPage The guest-linear address of the page being invalidated.
2793 * @param cbInstr The instruction length in bytes.
2794 */
2795IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrInvlpg(PVMCPU pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr)
2796{
2797 VMXVEXITINFO ExitInfo;
2798 RT_ZERO(ExitInfo);
2799 ExitInfo.uReason = VMX_EXIT_INVLPG;
2800 ExitInfo.cbInstr = cbInstr;
2801 ExitInfo.u64Qual = GCPtrPage;
2802 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(ExitInfo.u64Qual));
2803
2804 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2805}
2806
2807
2808/**
2809 * VMX VM-exit handler for VM-exits due to LMSW.
2810 *
2811 * @returns Strict VBox status code.
2812 * @param pVCpu The cross context virtual CPU structure.
2813 * @param uGuestCr0 The current guest CR0.
2814 * @param pu16NewMsw The machine-status word specified in LMSW's source
2815 * operand. This will be updated depending on the VMX
2816 * guest/host CR0 mask if LMSW is not intercepted.
2817 * @param GCPtrEffDst The guest-linear address of the source operand in case
2818 * of a memory operand. For register operand, pass
2819 * NIL_RTGCPTR.
2820 * @param cbInstr The instruction length in bytes.
2821 */
2822IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPU pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw, RTGCPTR GCPtrEffDst,
2823 uint8_t cbInstr)
2824{
2825 Assert(pu16NewMsw);
2826
2827 uint16_t const uNewMsw = *pu16NewMsw;
2828 if (CPUMIsGuestVmxLmswInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, uNewMsw))
2829 {
2830 Log2(("lmsw: Guest intercept -> VM-exit\n"));
2831
2832 VMXVEXITINFO ExitInfo;
2833 RT_ZERO(ExitInfo);
2834 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
2835 ExitInfo.cbInstr = cbInstr;
2836
2837 bool const fMemOperand = RT_BOOL(GCPtrEffDst != NIL_RTGCPTR);
2838 if (fMemOperand)
2839 {
2840 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(GCPtrEffDst));
2841 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
2842 }
2843
2844 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
2845 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_LMSW)
2846 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_OP, fMemOperand)
2847 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_DATA, uNewMsw);
2848
2849 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2850 }
2851
2852 /*
2853 * If LMSW did not cause a VM-exit, any CR0 bits in the range 0:3 that is set in the
2854 * CR0 guest/host mask must be left unmodified.
2855 *
2856 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
2857 */
2858 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2859 Assert(pVmcs);
2860 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
2861 uint32_t const fGstHostLmswMask = fGstHostMask & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
2862 *pu16NewMsw = (uGuestCr0 & fGstHostLmswMask) | (uNewMsw & ~fGstHostLmswMask);
2863
2864 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
2865}
2866
2867
2868/**
2869 * VMX VM-exit handler for VM-exits due to CLTS.
2870 *
2871 * @returns Strict VBox status code.
2872 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the CLTS instruction did not cause a
2873 * VM-exit but must not modify the guest CR0.TS bit.
2874 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the CLTS instruction did not cause a
2875 * VM-exit and modification to the guest CR0.TS bit is allowed (subject to
2876 * CR0 fixed bits in VMX operation).
2877 * @param pVCpu The cross context virtual CPU structure.
2878 * @param cbInstr The instruction length in bytes.
2879 */
2880IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPU pVCpu, uint8_t cbInstr)
2881{
2882 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2883 Assert(pVmcs);
2884
2885 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
2886 uint32_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
2887
2888 /*
2889 * If CR0.TS is owned by the host:
2890 * - If CR0.TS is set in the read-shadow, we must cause a VM-exit.
2891 * - If CR0.TS is cleared in the read-shadow, no VM-exit is caused and the
2892 * CLTS instruction completes without clearing CR0.TS.
2893 *
2894 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2895 */
2896 if (fGstHostMask & X86_CR0_TS)
2897 {
2898 if (fReadShadow & X86_CR0_TS)
2899 {
2900 Log2(("clts: Guest intercept -> VM-exit\n"));
2901
2902 VMXVEXITINFO ExitInfo;
2903 RT_ZERO(ExitInfo);
2904 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
2905 ExitInfo.cbInstr = cbInstr;
2906 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
2907 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_CLTS);
2908 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2909 }
2910
2911 return VINF_VMX_MODIFIES_BEHAVIOR;
2912 }
2913
2914 /*
2915 * If CR0.TS is not owned by the host, the CLTS instructions operates normally
2916 * and may modify CR0.TS (subject to CR0 fixed bits in VMX operation).
2917 */
2918 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
2919}
2920
2921
2922/**
2923 * VMX VM-exit handler for VM-exits due to 'Mov CR0,GReg' and 'Mov CR4,GReg'
2924 * (CR0/CR4 write).
2925 *
2926 * @returns Strict VBox status code.
2927 * @param pVCpu The cross context virtual CPU structure.
2928 * @param iCrReg The control register (either CR0 or CR4).
2929 * @param uGuestCrX The current guest CR0/CR4.
2930 * @param puNewCrX Pointer to the new CR0/CR4 value. Will be updated if no
2931 * VM-exit is caused.
2932 * @param iGReg The general register from which the CR0/CR4 value is being
2933 * loaded.
2934 * @param cbInstr The instruction length in bytes.
2935 */
2936IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr0Cr4(PVMCPU pVCpu, uint8_t iCrReg, uint64_t *puNewCrX, uint8_t iGReg,
2937 uint8_t cbInstr)
2938{
2939 Assert(puNewCrX);
2940 Assert(iCrReg == 0 || iCrReg == 4);
2941 Assert(iGReg < X86_GREG_COUNT);
2942
2943 uint64_t const uNewCrX = *puNewCrX;
2944 if (CPUMIsGuestVmxMovToCr0Cr4InterceptSet(pVCpu, &pVCpu->cpum.GstCtx, iCrReg, uNewCrX))
2945 {
2946 Log2(("mov_Cr_Rd: (CR%u) Guest intercept -> VM-exit\n", iCrReg));
2947
2948 VMXVEXITINFO ExitInfo;
2949 RT_ZERO(ExitInfo);
2950 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
2951 ExitInfo.cbInstr = cbInstr;
2952 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, iCrReg)
2953 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
2954 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
2955 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2956 }
2957
2958 /*
2959 * If the Mov-to-CR0/CR4 did not cause a VM-exit, any bits owned by the host
2960 * must not be modified the instruction.
2961 *
2962 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
2963 */
2964 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2965 Assert(pVmcs);
2966 uint64_t uGuestCrX;
2967 uint64_t fGstHostMask;
2968 if (iCrReg == 0)
2969 {
2970 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
2971 uGuestCrX = pVCpu->cpum.GstCtx.cr0;
2972 fGstHostMask = pVmcs->u64Cr0Mask.u;
2973 }
2974 else
2975 {
2976 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
2977 uGuestCrX = pVCpu->cpum.GstCtx.cr4;
2978 fGstHostMask = pVmcs->u64Cr4Mask.u;
2979 }
2980
2981 *puNewCrX = (uGuestCrX & fGstHostMask) | (*puNewCrX & ~fGstHostMask);
2982 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
2983}
2984
2985
2986/**
2987 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR3' (CR3 read).
2988 *
2989 * @returns VBox strict status code.
2990 * @param pVCpu The cross context virtual CPU structure.
2991 * @param iGReg The general register to which the CR3 value is being stored.
2992 * @param cbInstr The instruction length in bytes.
2993 */
2994IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
2995{
2996 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2997 Assert(pVmcs);
2998 Assert(iGReg < X86_GREG_COUNT);
2999 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
3000
3001 /*
3002 * If the CR3-store exiting control is set, we must cause a VM-exit.
3003 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3004 */
3005 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT)
3006 {
3007 Log2(("mov_Rd_Cr: (CR3) Guest intercept -> VM-exit\n"));
3008
3009 VMXVEXITINFO ExitInfo;
3010 RT_ZERO(ExitInfo);
3011 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3012 ExitInfo.cbInstr = cbInstr;
3013 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
3014 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ)
3015 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3016 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3017 }
3018
3019 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3020}
3021
3022
3023/**
3024 * VMX VM-exit handler for VM-exits due to 'Mov CR3,GReg' (CR3 write).
3025 *
3026 * @returns VBox strict status code.
3027 * @param pVCpu The cross context virtual CPU structure.
3028 * @param uNewCr3 The new CR3 value.
3029 * @param iGReg The general register from which the CR3 value is being
3030 * loaded.
3031 * @param cbInstr The instruction length in bytes.
3032 */
3033IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr3(PVMCPU pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr)
3034{
3035 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3036 Assert(pVmcs);
3037 Assert(iGReg < X86_GREG_COUNT);
3038
3039 /*
3040 * If the CR3-load exiting control is set and the new CR3 value does not
3041 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
3042 *
3043 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3044 */
3045 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_LOAD_EXIT)
3046 {
3047 uint32_t const uCr3TargetCount = pVmcs->u32Cr3TargetCount;
3048 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
3049
3050 /* If the CR3-target count is 0, we must always cause a VM-exit. */
3051 bool fIntercept = RT_BOOL(uCr3TargetCount == 0);
3052 if (!fIntercept)
3053 {
3054 for (uint32_t idxCr3Target = 0; idxCr3Target < uCr3TargetCount; idxCr3Target++)
3055 {
3056 uint64_t const uCr3TargetValue = iemVmxVmcsGetCr3TargetValue(pVmcs, idxCr3Target);
3057 if (uNewCr3 != uCr3TargetValue)
3058 {
3059 fIntercept = true;
3060 break;
3061 }
3062 }
3063 }
3064
3065 if (fIntercept)
3066 {
3067 Log2(("mov_Cr_Rd: (CR3) Guest intercept -> VM-exit\n"));
3068
3069 VMXVEXITINFO ExitInfo;
3070 RT_ZERO(ExitInfo);
3071 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3072 ExitInfo.cbInstr = cbInstr;
3073 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
3074 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3075 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3076 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3077 }
3078 }
3079
3080 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3081}
3082
3083
3084/**
3085 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR8' (CR8 read).
3086 *
3087 * @returns VBox strict status code.
3088 * @param pVCpu The cross context virtual CPU structure.
3089 * @param iGReg The general register to which the CR8 value is being stored.
3090 * @param cbInstr The instruction length in bytes.
3091 */
3092IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3093{
3094 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3095 Assert(pVmcs);
3096 Assert(iGReg < X86_GREG_COUNT);
3097
3098 /*
3099 * If the CR8-store exiting control is set, we must cause a VM-exit.
3100 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3101 */
3102 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT)
3103 {
3104 Log2(("mov_Rd_Cr: (CR8) Guest intercept -> VM-exit\n"));
3105
3106 VMXVEXITINFO ExitInfo;
3107 RT_ZERO(ExitInfo);
3108 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3109 ExitInfo.cbInstr = cbInstr;
3110 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
3111 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ)
3112 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3113 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3114 }
3115
3116 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3117}
3118
3119
3120/**
3121 * VMX VM-exit handler for VM-exits due to 'Mov CR8,GReg' (CR8 write).
3122 *
3123 * @returns VBox strict status code.
3124 * @param pVCpu The cross context virtual CPU structure.
3125 * @param iGReg The general register from which the CR8 value is being
3126 * loaded.
3127 * @param cbInstr The instruction length in bytes.
3128 */
3129IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3130{
3131 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3132 Assert(pVmcs);
3133 Assert(iGReg < X86_GREG_COUNT);
3134
3135 /*
3136 * If the CR8-load exiting control is set, we must cause a VM-exit.
3137 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3138 */
3139 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT)
3140 {
3141 Log2(("mov_Cr_Rd: (CR8) Guest intercept -> VM-exit\n"));
3142
3143 VMXVEXITINFO ExitInfo;
3144 RT_ZERO(ExitInfo);
3145 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3146 ExitInfo.cbInstr = cbInstr;
3147 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
3148 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3149 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3150 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3151 }
3152
3153 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3154}
3155
3156
3157/**
3158 * VMX VM-exit handler for VM-exits due to 'Mov DRx,GReg' (DRx write) and 'Mov
3159 * GReg,DRx' (DRx read).
3160 *
3161 * @returns VBox strict status code.
3162 * @param pVCpu The cross context virtual CPU structure.
3163 * @param uInstrid The instruction identity (VMXINSTRID_MOV_TO_DRX or
3164 * VMXINSTRID_MOV_FROM_DRX).
3165 * @param iDrReg The debug register being accessed.
3166 * @param iGReg The general register to/from which the DRx value is being
3167 * store/loaded.
3168 * @param cbInstr The instruction length in bytes.
3169 */
3170IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovDrX(PVMCPU pVCpu, VMXINSTRID uInstrId, uint8_t iDrReg, uint8_t iGReg,
3171 uint8_t cbInstr)
3172{
3173 Assert(iDrReg <= 7);
3174 Assert(uInstrId == VMXINSTRID_MOV_TO_DRX || uInstrId == VMXINSTRID_MOV_FROM_DRX);
3175 Assert(iGReg < X86_GREG_COUNT);
3176
3177 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3178 Assert(pVmcs);
3179
3180 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT)
3181 {
3182 uint32_t const uDirection = uInstrId == VMXINSTRID_MOV_TO_DRX ? VMX_EXIT_QUAL_DRX_DIRECTION_WRITE
3183 : VMX_EXIT_QUAL_DRX_DIRECTION_READ;
3184 VMXVEXITINFO ExitInfo;
3185 RT_ZERO(ExitInfo);
3186 ExitInfo.uReason = VMX_EXIT_MOV_DRX;
3187 ExitInfo.cbInstr = cbInstr;
3188 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_REGISTER, iDrReg)
3189 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_DIRECTION, uDirection)
3190 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_GENREG, iGReg);
3191 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3192 }
3193
3194 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3195}
3196
3197
3198/**
3199 * VMX VM-exit handler for VM-exits due to I/O instructions (IN and OUT).
3200 *
3201 * @returns VBox strict status code.
3202 * @param pVCpu The cross context virtual CPU structure.
3203 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_IO_IN or
3204 * VMXINSTRID_IO_OUT).
3205 * @param u16Port The I/O port being accessed.
3206 * @param fImm Whether the I/O port was encoded using an immediate operand
3207 * or the implicit DX register.
3208 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3209 * @param cbInstr The instruction length in bytes.
3210 */
3211IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrIo(PVMCPU pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, bool fImm, uint8_t cbAccess,
3212 uint8_t cbInstr)
3213{
3214 Assert(uInstrId == VMXINSTRID_IO_IN || uInstrId == VMXINSTRID_IO_OUT);
3215 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3216
3217 bool const fIntercept = CPUMIsGuestVmxIoInterceptSet(pVCpu, u16Port, cbAccess);
3218 if (fIntercept)
3219 {
3220 uint32_t const uDirection = uInstrId == VMXINSTRID_IO_IN ? VMX_EXIT_QUAL_IO_DIRECTION_IN
3221 : VMX_EXIT_QUAL_IO_DIRECTION_OUT;
3222 VMXVEXITINFO ExitInfo;
3223 RT_ZERO(ExitInfo);
3224 ExitInfo.uReason = VMX_EXIT_IO_INSTR;
3225 ExitInfo.cbInstr = cbInstr;
3226 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_WIDTH, cbAccess - 1)
3227 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_DIRECTION, uDirection)
3228 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_ENCODING, fImm)
3229 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_PORT, u16Port);
3230 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3231 }
3232
3233 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3234}
3235
3236
3237/**
3238 * VMX VM-exit handler for VM-exits due to string I/O instructions (INS and OUTS).
3239 *
3240 * @returns VBox strict status code.
3241 * @param pVCpu The cross context virtual CPU structure.
3242 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_IO_INS or
3243 * VMXINSTRID_IO_OUTS).
3244 * @param u16Port The I/O port being accessed.
3245 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3246 * @param fRep Whether the instruction has a REP prefix or not.
3247 * @param ExitInstrInfo The VM-exit instruction info. field.
3248 * @param cbInstr The instruction length in bytes.
3249 */
3250IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrStrIo(PVMCPU pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, uint8_t cbAccess, bool fRep,
3251 VMXEXITINSTRINFO ExitInstrInfo, uint8_t cbInstr)
3252{
3253 Assert(uInstrId == VMXINSTRID_IO_INS || uInstrId == VMXINSTRID_IO_OUTS);
3254 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3255 Assert(ExitInstrInfo.StrIo.iSegReg < X86_SREG_COUNT);
3256 Assert(ExitInstrInfo.StrIo.u3AddrSize == 0 || ExitInstrInfo.StrIo.u3AddrSize == 1 || ExitInstrInfo.StrIo.u3AddrSize == 2);
3257 Assert(uInstrId != VMXINSTRID_IO_INS || ExitInstrInfo.StrIo.iSegReg == X86_SREG_ES);
3258
3259 bool const fIntercept = CPUMIsGuestVmxIoInterceptSet(pVCpu, u16Port, cbAccess);
3260 if (fIntercept)
3261 {
3262 /*
3263 * Figure out the guest-linear address and the direction bit (INS/OUTS).
3264 */
3265 /** @todo r=ramshankar: Is there something in IEM that already does this? */
3266 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
3267 uint8_t const iSegReg = ExitInstrInfo.StrIo.iSegReg;
3268 uint8_t const uAddrSize = ExitInstrInfo.StrIo.u3AddrSize;
3269 uint64_t const uAddrSizeMask = s_auAddrSizeMasks[uAddrSize];
3270
3271 uint32_t uDirection;
3272 uint64_t uGuestLinearAddr;
3273 if (uInstrId == VMXINSTRID_IO_INS)
3274 {
3275 uDirection = VMX_EXIT_QUAL_IO_DIRECTION_IN;
3276 uGuestLinearAddr = pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base + (pVCpu->cpum.GstCtx.rdi & uAddrSizeMask);
3277 }
3278 else
3279 {
3280 uDirection = VMX_EXIT_QUAL_IO_DIRECTION_OUT;
3281 uGuestLinearAddr = pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base + (pVCpu->cpum.GstCtx.rsi & uAddrSizeMask);
3282 }
3283
3284 /*
3285 * If the segment is unusable, the guest-linear address in undefined.
3286 * We shall clear it for consistency.
3287 *
3288 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
3289 */
3290 if (pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable)
3291 uGuestLinearAddr = 0;
3292
3293 VMXVEXITINFO ExitInfo;
3294 RT_ZERO(ExitInfo);
3295 ExitInfo.uReason = VMX_EXIT_IO_INSTR;
3296 ExitInfo.cbInstr = cbInstr;
3297 ExitInfo.InstrInfo = ExitInstrInfo;
3298 ExitInfo.u64GuestLinearAddr = uGuestLinearAddr;
3299 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_WIDTH, cbAccess - 1)
3300 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_DIRECTION, uDirection)
3301 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_IS_STRING, 1)
3302 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_IS_REP, fRep)
3303 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_ENCODING, VMX_EXIT_QUAL_IO_ENCODING_DX)
3304 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_PORT, u16Port);
3305 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3306 }
3307
3308 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3309}
3310
3311
3312/**
3313 * VMX VM-exit handler for VM-exits due to MWAIT.
3314 *
3315 * @returns VBox strict status code.
3316 * @param pVCpu The cross context virtual CPU structure.
3317 * @param fMonitorHwArmed Whether the address-range monitor hardware is armed.
3318 * @param cbInstr The instruction length in bytes.
3319 */
3320IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMwait(PVMCPU pVCpu, bool fMonitorHwArmed, uint8_t cbInstr)
3321{
3322 VMXVEXITINFO ExitInfo;
3323 RT_ZERO(ExitInfo);
3324 ExitInfo.uReason = VMX_EXIT_MWAIT;
3325 ExitInfo.cbInstr = cbInstr;
3326 ExitInfo.u64Qual = fMonitorHwArmed;
3327 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3328}
3329
3330
3331/**
3332 * VMX VM-exit handler for VM-exits due to PAUSE.
3333 *
3334 * @returns VBox strict status code.
3335 * @param pVCpu The cross context virtual CPU structure.
3336 * @param cbInstr The instruction length in bytes.
3337 */
3338IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrPause(PVMCPU pVCpu, uint8_t cbInstr)
3339{
3340 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3341 Assert(pVmcs);
3342
3343 /*
3344 * The PAUSE VM-exit is controlled by the "PAUSE exiting" control and the
3345 * "PAUSE-loop exiting" control.
3346 *
3347 * The PLE-Gap is the maximum number of TSC ticks between two successive executions of
3348 * the PAUSE instruction before we cause a VM-exit. The PLE-Window is the maximum amount
3349 * of TSC ticks the guest is allowed to execute in a pause loop before we must cause
3350 * a VM-exit.
3351 *
3352 * See Intel spec. 24.6.13 "Controls for PAUSE-Loop Exiting".
3353 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3354 */
3355 bool fIntercept = false;
3356 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_PAUSE_EXIT)
3357 fIntercept = true;
3358 else if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)
3359 && pVCpu->iem.s.uCpl == 0)
3360 {
3361 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
3362
3363 /*
3364 * A previous-PAUSE-tick value of 0 is used to identify the first time
3365 * execution of a PAUSE instruction after VM-entry at CPL 0. We must
3366 * consider this to be the first execution of PAUSE in a loop according
3367 * to the Intel.
3368 *
3369 * All subsequent records for the previous-PAUSE-tick we ensure that it
3370 * cannot be zero by OR'ing 1 to rule out the TSC wrap-around cases at 0.
3371 */
3372 uint64_t *puFirstPauseLoopTick = &pVCpu->cpum.GstCtx.hwvirt.vmx.uFirstPauseLoopTick;
3373 uint64_t *puPrevPauseTick = &pVCpu->cpum.GstCtx.hwvirt.vmx.uPrevPauseTick;
3374 uint64_t const uTick = TMCpuTickGet(pVCpu);
3375 uint32_t const uPleGap = pVmcs->u32PleGap;
3376 uint32_t const uPleWindow = pVmcs->u32PleWindow;
3377 if ( *puPrevPauseTick == 0
3378 || uTick - *puPrevPauseTick > uPleGap)
3379 *puFirstPauseLoopTick = uTick;
3380 else if (uTick - *puFirstPauseLoopTick > uPleWindow)
3381 fIntercept = true;
3382
3383 *puPrevPauseTick = uTick | 1;
3384 }
3385
3386 if (fIntercept)
3387 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_PAUSE, cbInstr);
3388
3389 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3390}
3391
3392
3393/**
3394 * VMX VM-exit handler for VM-exits due to task switches.
3395 *
3396 * @returns VBox strict status code.
3397 * @param pVCpu The cross context virtual CPU structure.
3398 * @param enmTaskSwitch The cause of the task switch.
3399 * @param SelNewTss The selector of the new TSS.
3400 * @param cbInstr The instruction length in bytes.
3401 */
3402IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr)
3403{
3404 /*
3405 * Task-switch VM-exits are unconditional and provide the Exit qualification.
3406 *
3407 * If the cause of the task switch is due to execution of CALL, IRET or the JMP
3408 * instruction or delivery of the exception generated by one of these instructions
3409 * lead to a task switch through a task gate in the IDT, we need to provide the
3410 * VM-exit instruction length. Any other means of invoking a task switch VM-exit
3411 * leaves the VM-exit instruction length field undefined.
3412 *
3413 * See Intel spec. 25.2 "Other Causes Of VM Exits".
3414 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
3415 */
3416 Assert(cbInstr <= 15);
3417
3418 uint8_t uType;
3419 switch (enmTaskSwitch)
3420 {
3421 case IEMTASKSWITCH_CALL: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_CALL; break;
3422 case IEMTASKSWITCH_IRET: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IRET; break;
3423 case IEMTASKSWITCH_JUMP: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_JMP; break;
3424 case IEMTASKSWITCH_INT_XCPT: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT; break;
3425 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3426 }
3427
3428 uint64_t const u64ExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_NEW_TSS, SelNewTss)
3429 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_SOURCE, uType);
3430 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
3431 return iemVmxVmexit(pVCpu, VMX_EXIT_TASK_SWITCH, u64ExitQual);
3432}
3433
3434
3435/**
3436 * VMX VM-exit handler for VM-exits due to task switches.
3437 *
3438 * This is intended for task switches where the caller provides all the relevant
3439 * VM-exit information.
3440 *
3441 * @returns VBox strict status code.
3442 * @param pVCpu The cross context virtual CPU structure.
3443 * @param pExitInfo Pointer to the VM-exit information.
3444 * @param pExitEventInfo Pointer to the VM-exit event information.
3445 */
3446IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitchWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo,
3447 PCVMXVEXITEVENTINFO pExitEventInfo)
3448{
3449 Assert(pExitInfo);
3450 Assert(pExitEventInfo);
3451
3452 /* The Exit qualification is mandatory for all task-switch VM-exits. */
3453 uint64_t const u64ExitQual = pExitInfo->u64Qual;
3454 iemVmxVmcsSetExitQual(pVCpu, u64ExitQual);
3455
3456 /*
3457 * Figure out if an instruction was the source of the task switch.
3458 *
3459 * If the task-switch was due to CALL/IRET/JMP instruction or due to the delivery
3460 * of an event generated by a software interrupt (INT-N), privileged software
3461 * interrupt (INT1/ICEBP) or software exception (INT3/INTO) then the CPU provides
3462 * the instruction length.
3463 */
3464 bool fHasInstrLen;
3465 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(u64ExitQual) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT)
3466 {
3467 /* Check if an event delivery through IDT caused a task switch VM-exit. */
3468 uint32_t const uIdtVectInfo = pExitEventInfo->uIdtVectoringInfo;
3469 bool const fIdtVectInfoValid = VMX_IDT_VECTORING_INFO_IS_VALID(uIdtVectInfo);
3470 if (fIdtVectInfoValid)
3471 {
3472 iemVmxVmcsSetIdtVectoringInfo(pVCpu, uIdtVectInfo);
3473 if (VMX_IDT_VECTORING_INFO_IS_ERROR_CODE_VALID(uIdtVectInfo))
3474 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode);
3475
3476 uint8_t const fIdtVectType = VMX_IDT_VECTORING_INFO_TYPE(uIdtVectInfo);
3477 if ( fIdtVectType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
3478 || fIdtVectType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT
3479 || fIdtVectType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT)
3480 fHasInstrLen = true;
3481 else
3482 fHasInstrLen = false;
3483 }
3484 else
3485 fHasInstrLen = false;
3486 }
3487 else
3488 {
3489 /* CALL, IRET or JMP instruction caused the task switch VM-exit. */
3490 fHasInstrLen = true;
3491 }
3492
3493 if (fHasInstrLen)
3494 {
3495 Assert(pExitInfo->cbInstr > 0);
3496 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
3497 }
3498 return iemVmxVmexit(pVCpu, VMX_EXIT_TASK_SWITCH, u64ExitQual);
3499}
3500
3501
3502/**
3503 * VMX VM-exit handler for VM-exits due to expiring of the preemption timer.
3504 *
3505 * @returns VBox strict status code.
3506 * @param pVCpu The cross context virtual CPU structure.
3507 */
3508IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPU pVCpu)
3509{
3510 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3511 Assert(pVmcs);
3512
3513 /* The VM-exit is subject to "Activate VMX-preemption timer" being set. */
3514 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
3515 {
3516 /* Import the hardware virtualization state (for nested-guest VM-entry TSC-tick). */
3517 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
3518
3519 /*
3520 * Calculate the current VMX-preemption timer value.
3521 * Only if the value has reached zero, we cause the VM-exit.
3522 */
3523 uint32_t uPreemptTimer = iemVmxCalcPreemptTimer(pVCpu);
3524 if (!uPreemptTimer)
3525 {
3526 /* Save the VMX-preemption timer value (of 0) back in to the VMCS if the CPU supports this feature. */
3527 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER)
3528 pVmcs->u32PreemptTimer = 0;
3529
3530 /* Cause the VMX-preemption timer VM-exit. The Exit qualification MBZ. */
3531 return iemVmxVmexit(pVCpu, VMX_EXIT_PREEMPT_TIMER, 0 /* u64ExitQual */);
3532 }
3533 }
3534
3535 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3536}
3537
3538
3539/**
3540 * VMX VM-exit handler for VM-exits due to external interrupts.
3541 *
3542 * @returns VBox strict status code.
3543 * @param pVCpu The cross context virtual CPU structure.
3544 * @param uVector The external interrupt vector (pass 0 if the interrupt
3545 * is still pending since we typically won't know the
3546 * vector).
3547 * @param fIntPending Whether the external interrupt is pending or
3548 * acknowledged in the interrupt controller.
3549 */
3550IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
3551{
3552 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3553 Assert(pVmcs);
3554 Assert(fIntPending || uVector == 0);
3555
3556 /** @todo NSTVMX: r=ramshankar: Consider standardizing check basic/blanket
3557 * intercepts for VM-exits. Right now it is not clear which iemVmxVmexitXXX()
3558 * functions require prior checking of a blanket intercept and which don't.
3559 * It is better for the caller to check a blanket intercept performance wise
3560 * than making a function call. Leaving this as a todo because it is more
3561 * a performance issue. */
3562
3563 /* The VM-exit is subject to "External interrupt exiting" being set. */
3564 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT)
3565 {
3566 if (fIntPending)
3567 {
3568 /*
3569 * If the interrupt is pending and we don't need to acknowledge the
3570 * interrupt on VM-exit, cause the VM-exit immediately.
3571 *
3572 * See Intel spec 25.2 "Other Causes Of VM Exits".
3573 */
3574 if (!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT))
3575 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT, 0 /* u64ExitQual */);
3576
3577 /*
3578 * If the interrupt is pending and we -do- need to acknowledge the interrupt
3579 * on VM-exit, postpone VM-exit till after the interrupt controller has been
3580 * acknowledged that the interrupt has been consumed.
3581 */
3582 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3583 }
3584
3585 /*
3586 * If the interrupt is no longer pending (i.e. it has been acknowledged) and the
3587 * "External interrupt exiting" and "Acknowledge interrupt on VM-exit" controls are
3588 * all set, we cause the VM-exit now. We need to record the external interrupt that
3589 * just occurred in the VM-exit interruption information field.
3590 *
3591 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events".
3592 */
3593 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
3594 {
3595 bool const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret;
3596 uint32_t const uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, uVector)
3597 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_EXT_INT)
3598 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, fNmiUnblocking)
3599 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3600 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3601 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT, 0 /* u64ExitQual */);
3602 }
3603 }
3604
3605 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3606}
3607
3608
3609/**
3610 * VMX VM-exit handler for VM-exits due to a double fault caused during delivery of
3611 * an event.
3612 *
3613 * @returns VBox strict status code.
3614 * @param pVCpu The cross context virtual CPU structure.
3615 */
3616IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPU pVCpu)
3617{
3618 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3619 Assert(pVmcs);
3620
3621 uint32_t const fXcptBitmap = pVmcs->u32XcptBitmap;
3622 if (fXcptBitmap & RT_BIT(X86_XCPT_DF))
3623 {
3624 uint8_t const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret;
3625 uint32_t const uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_DF)
3626 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_HW_XCPT)
3627 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_ERR_CODE_VALID, 1)
3628 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, fNmiUnblocking)
3629 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3630 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3631 iemVmxVmcsSetExitIntErrCode(pVCpu, 0);
3632 iemVmxVmcsSetExitInstrLen(pVCpu, 0);
3633
3634 /*
3635 * A VM-exit is not considered to occur during event delivery when the original
3636 * event results in a double-fault that causes a VM-exit directly (i.e. intercepted
3637 * using the exception bitmap).
3638 *
3639 * Therefore, we must clear the original event from the IDT-vectoring fields which
3640 * would've been recorded before causing the VM-exit.
3641 *
3642 * 27.2.3 "Information for VM Exits During Event Delivery"
3643 */
3644 iemVmxVmcsSetIdtVectoringInfo(pVCpu, 0);
3645 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, 0);
3646 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI, 0 /* u64ExitQual */);
3647 }
3648
3649 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3650}
3651
3652
3653/**
3654 * VMX VM-exit handler for VM-exit due to delivery of an events.
3655 *
3656 * This is intended for VM-exit due to exceptions or NMIs where the caller provides
3657 * all the relevant VM-exit information.
3658 *
3659 * @returns VBox strict status code.
3660 * @param pVCpu The cross context virtual CPU structure.
3661 * @param pExitInfo Pointer to the VM-exit information.
3662 * @param pExitEventInfo Pointer to the VM-exit event information.
3663 */
3664IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
3665{
3666 Assert(pExitInfo);
3667 Assert(pExitEventInfo);
3668 Assert(VMX_EXIT_INT_INFO_IS_VALID(pExitEventInfo->uExitIntInfo));
3669
3670 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
3671 iemVmxVmcsSetExitIntInfo(pVCpu, pExitEventInfo->uExitIntInfo);
3672 iemVmxVmcsSetExitIntErrCode(pVCpu, pExitEventInfo->uExitIntErrCode);
3673 iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo);
3674 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode);
3675 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI, pExitInfo->u64Qual);
3676}
3677
3678
3679/**
3680 * VMX VM-exit handler for VM-exits due to delivery of an event.
3681 *
3682 * @returns VBox strict status code.
3683 * @param pVCpu The cross context virtual CPU structure.
3684 * @param uVector The interrupt / exception vector.
3685 * @param fFlags The flags (see IEM_XCPT_FLAGS_XXX).
3686 * @param uErrCode The error code associated with the event.
3687 * @param uCr2 The CR2 value in case of a \#PF exception.
3688 * @param cbInstr The instruction length in bytes.
3689 */
3690IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2,
3691 uint8_t cbInstr)
3692{
3693 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3694 Assert(pVmcs);
3695
3696 /*
3697 * If the event is being injected as part of VM-entry, it is -not- subject to event
3698 * intercepts in the nested-guest. However, secondary exceptions that occur during
3699 * injection of any event -are- subject to event interception.
3700 *
3701 * See Intel spec. 26.5.1.2 "VM Exits During Event Injection".
3702 */
3703 if (!pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents)
3704 {
3705 /*
3706 * If the event is a virtual-NMI (which is an NMI being inject during VM-entry)
3707 * virtual-NMI blocking must be set in effect rather than physical NMI blocking.
3708 *
3709 * See Intel spec. 24.6.1 "Pin-Based VM-Execution Controls".
3710 */
3711 if ( uVector == X86_XCPT_NMI
3712 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3713 && (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
3714 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = true;
3715 else
3716 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking);
3717
3718 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = true;
3719 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3720 }
3721
3722 /*
3723 * We are injecting an external interrupt, check if we need to cause a VM-exit now.
3724 * If not, the caller will continue delivery of the external interrupt as it would
3725 * normally. The interrupt is no longer pending in the interrupt controller at this
3726 * point.
3727 */
3728 if (fFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3729 {
3730 Assert(!VMX_IDT_VECTORING_INFO_IS_VALID(pVmcs->u32RoIdtVectoringInfo));
3731 return iemVmxVmexitExtInt(pVCpu, uVector, false /* fIntPending */);
3732 }
3733
3734 /*
3735 * Evaluate intercepts for hardware exceptions, software exceptions (#BP, #OF),
3736 * and privileged software exceptions (#DB generated by INT1/ICEBP) and software
3737 * interrupts.
3738 */
3739 Assert(fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_SOFT_INT));
3740 bool fIntercept;
3741 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3742 || (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR)))
3743 {
3744 fIntercept = CPUMIsGuestVmxXcptInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, uVector, uErrCode);
3745 }
3746 else
3747 {
3748 /* Software interrupts cannot be intercepted and therefore do not cause a VM-exit. */
3749 fIntercept = false;
3750 }
3751
3752 /*
3753 * Now that we've determined whether the event causes a VM-exit, we need to construct the
3754 * relevant VM-exit information and cause the VM-exit.
3755 */
3756 if (fIntercept)
3757 {
3758 Assert(!(fFlags & IEM_XCPT_FLAGS_T_EXT_INT));
3759
3760 /* Construct the rest of the event related information fields and cause the VM-exit. */
3761 uint64_t u64ExitQual;
3762 if (uVector == X86_XCPT_PF)
3763 {
3764 Assert(fFlags & IEM_XCPT_FLAGS_CR2);
3765 u64ExitQual = uCr2;
3766 }
3767 else if (uVector == X86_XCPT_DB)
3768 {
3769 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
3770 u64ExitQual = pVCpu->cpum.GstCtx.dr[6] & VMX_VMCS_EXIT_QUAL_VALID_MASK;
3771 }
3772 else
3773 u64ExitQual = 0;
3774
3775 uint8_t const fNmiUnblocking = pVCpu->cpum.GstCtx.hwvirt.vmx.fNmiUnblockingIret;
3776 bool const fErrCodeValid = RT_BOOL(fFlags & IEM_XCPT_FLAGS_ERR);
3777 uint8_t const uIntInfoType = iemVmxGetEventType(uVector, fFlags);
3778 uint32_t const uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, uVector)
3779 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, uIntInfoType)
3780 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_ERR_CODE_VALID, fErrCodeValid)
3781 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, fNmiUnblocking)
3782 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3783 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3784 iemVmxVmcsSetExitIntErrCode(pVCpu, uErrCode);
3785
3786 /*
3787 * For VM-exits due to software exceptions (those generated by INT3 or INTO) or privileged
3788 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
3789 * length.
3790 */
3791 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3792 || (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR)))
3793 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
3794 else
3795 iemVmxVmcsSetExitInstrLen(pVCpu, 0);
3796
3797 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI, u64ExitQual);
3798 }
3799
3800 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3801}
3802
3803
3804/**
3805 * VMX VM-exit handler for VM-exits due to a triple fault.
3806 *
3807 * @returns VBox strict status code.
3808 * @param pVCpu The cross context virtual CPU structure.
3809 */
3810IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu)
3811{
3812 /*
3813 * A VM-exit is not considered to occur during event delivery when the original
3814 * event results in a triple-fault.
3815 *
3816 * Therefore, we must clear the original event from the IDT-vectoring fields which
3817 * would've been recorded before causing the VM-exit.
3818 *
3819 * 27.2.3 "Information for VM Exits During Event Delivery"
3820 */
3821 iemVmxVmcsSetIdtVectoringInfo(pVCpu, 0);
3822 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, 0);
3823
3824 return iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
3825}
3826
3827
3828/**
3829 * VMX VM-exit handler for APIC accesses.
3830 *
3831 * @param pVCpu The cross context virtual CPU structure.
3832 * @param offAccess The offset of the register being accessed.
3833 * @param fAccess The type of access (must contain IEM_ACCESS_TYPE_READ or
3834 * IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION).
3835 */
3836IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess)
3837{
3838 Assert((fAccess & IEM_ACCESS_TYPE_READ) || (fAccess & IEM_ACCESS_TYPE_WRITE) || (fAccess & IEM_ACCESS_INSTRUCTION));
3839
3840 VMXAPICACCESS enmAccess;
3841 bool const fInEventDelivery = IEMGetCurrentXcpt(pVCpu, NULL, NULL, NULL, NULL);
3842 if (fInEventDelivery)
3843 enmAccess = VMXAPICACCESS_LINEAR_EVENT_DELIVERY;
3844 else if (fAccess & IEM_ACCESS_INSTRUCTION)
3845 enmAccess = VMXAPICACCESS_LINEAR_INSTR_FETCH;
3846 else if (fAccess & IEM_ACCESS_TYPE_WRITE)
3847 enmAccess = VMXAPICACCESS_LINEAR_WRITE;
3848 else
3849 enmAccess = VMXAPICACCESS_LINEAR_READ;
3850
3851 uint64_t const u64ExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_OFFSET, offAccess)
3852 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_TYPE, enmAccess);
3853 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_ACCESS, u64ExitQual);
3854}
3855
3856
3857/**
3858 * VMX VM-exit handler for APIC accesses.
3859 *
3860 * This is intended for APIC accesses where the caller provides all the
3861 * relevant VM-exit information.
3862 *
3863 * @returns VBox strict status code.
3864 * @param pVCpu The cross context virtual CPU structure.
3865 * @param pExitInfo Pointer to the VM-exit information.
3866 * @param pExitEventInfo Pointer to the VM-exit event information.
3867 */
3868IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccessWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo,
3869 PCVMXVEXITEVENTINFO pExitEventInfo)
3870{
3871 /* VM-exit interruption information should not be valid for APIC-access VM-exits. */
3872 Assert(!VMX_EXIT_INT_INFO_IS_VALID(pExitEventInfo->uExitIntInfo));
3873 iemVmxVmcsSetExitIntInfo(pVCpu, 0);
3874 iemVmxVmcsSetExitIntErrCode(pVCpu, 0);
3875 iemVmxVmcsSetIdtVectoringInfo(pVCpu, pExitEventInfo->uIdtVectoringInfo);
3876 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, pExitEventInfo->uIdtVectoringErrCode);
3877 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_ACCESS, pExitInfo->u64Qual);
3878}
3879
3880
3881/**
3882 * VMX VM-exit handler for APIC-write VM-exits.
3883 *
3884 * @param pVCpu The cross context virtual CPU structure.
3885 * @param offApic The write to the virtual-APIC page offset that caused this
3886 * VM-exit.
3887 */
3888IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicWrite(PVMCPU pVCpu, uint16_t offApic)
3889{
3890 Assert(offApic < XAPIC_OFF_END + 4);
3891 /* Write only bits 11:0 of the APIC offset into the Exit qualification field. */
3892 offApic &= UINT16_C(0xfff);
3893 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_WRITE, offApic);
3894}
3895
3896
3897/**
3898 * Sets virtual-APIC write emulation as pending.
3899 *
3900 * @param pVCpu The cross context virtual CPU structure.
3901 * @param offApic The offset in the virtual-APIC page that was written.
3902 */
3903DECLINLINE(void) iemVmxVirtApicSetPendingWrite(PVMCPU pVCpu, uint16_t offApic)
3904{
3905 Assert(offApic < XAPIC_OFF_END + 4);
3906
3907 /*
3908 * Record the currently updated APIC offset, as we need this later for figuring
3909 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
3910 * as for supplying the exit qualification when causing an APIC-write VM-exit.
3911 */
3912 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
3913
3914 /*
3915 * Signal that we need to perform virtual-APIC write emulation (TPR/PPR/EOI/Self-IPI
3916 * virtualization or APIC-write emulation).
3917 */
3918 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
3919 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
3920}
3921
3922
3923/**
3924 * Clears any pending virtual-APIC write emulation.
3925 *
3926 * @returns The virtual-APIC offset that was written before clearing it.
3927 * @param pVCpu The cross context virtual CPU structure.
3928 */
3929DECLINLINE(uint16_t) iemVmxVirtApicClearPendingWrite(PVMCPU pVCpu)
3930{
3931 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
3932 uint8_t const offVirtApicWrite = pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite;
3933 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = 0;
3934 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
3935 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
3936 return offVirtApicWrite;
3937}
3938
3939
3940/**
3941 * Reads a 32-bit register from the virtual-APIC page at the given offset.
3942 *
3943 * @returns The register from the virtual-APIC page.
3944 * @param pVCpu The cross context virtual CPU structure.
3945 * @param offReg The offset of the register being read.
3946 */
3947IEM_STATIC uint32_t iemVmxVirtApicReadRaw32(PVMCPU pVCpu, uint16_t offReg)
3948{
3949 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3950 Assert(pVmcs);
3951
3952 uint32_t uReg;
3953 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg));
3954 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
3955 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg));
3956 if (RT_FAILURE(rc))
3957 {
3958 AssertMsgFailed(("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg,
3959 GCPhysVirtApic));
3960 uReg = 0;
3961 }
3962 return uReg;
3963}
3964
3965
3966/**
3967 * Reads a 64-bit register from the virtual-APIC page at the given offset.
3968 *
3969 * @returns The register from the virtual-APIC page.
3970 * @param pVCpu The cross context virtual CPU structure.
3971 * @param offReg The offset of the register being read.
3972 */
3973IEM_STATIC uint64_t iemVmxVirtApicReadRaw64(PVMCPU pVCpu, uint16_t offReg)
3974{
3975 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3976 Assert(pVmcs);
3977
3978 uint64_t uReg;
3979 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg));
3980 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
3981 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg, sizeof(uReg));
3982 if (RT_FAILURE(rc))
3983 {
3984 AssertMsgFailed(("Failed to read %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg,
3985 GCPhysVirtApic));
3986 uReg = 0;
3987 }
3988 return uReg;
3989}
3990
3991
3992/**
3993 * Writes a 32-bit register to the virtual-APIC page at the given offset.
3994 *
3995 * @param pVCpu The cross context virtual CPU structure.
3996 * @param offReg The offset of the register being written.
3997 * @param uReg The register value to write.
3998 */
3999IEM_STATIC void iemVmxVirtApicWriteRaw32(PVMCPU pVCpu, uint16_t offReg, uint32_t uReg)
4000{
4001 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4002 Assert(pVmcs);
4003 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg));
4004 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
4005 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg));
4006 if (RT_FAILURE(rc))
4007 {
4008 AssertMsgFailed(("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg,
4009 GCPhysVirtApic));
4010 }
4011}
4012
4013
4014/**
4015 * Writes a 64-bit register to the virtual-APIC page at the given offset.
4016 *
4017 * @param pVCpu The cross context virtual CPU structure.
4018 * @param offReg The offset of the register being written.
4019 * @param uReg The register value to write.
4020 */
4021IEM_STATIC void iemVmxVirtApicWriteRaw64(PVMCPU pVCpu, uint16_t offReg, uint64_t uReg)
4022{
4023 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4024 Assert(pVmcs);
4025 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uReg));
4026 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
4027 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg, &uReg, sizeof(uReg));
4028 if (RT_FAILURE(rc))
4029 {
4030 AssertMsgFailed(("Failed to write %u bytes at offset %#x of the virtual-APIC page at %#RGp\n", sizeof(uReg), offReg,
4031 GCPhysVirtApic));
4032 }
4033}
4034
4035
4036/**
4037 * Sets the vector in a virtual-APIC 256-bit sparse register.
4038 *
4039 * @param pVCpu The cross context virtual CPU structure.
4040 * @param offReg The offset of the 256-bit spare register.
4041 * @param uVector The vector to set.
4042 *
4043 * @remarks This is based on our APIC device code.
4044 */
4045IEM_STATIC void iemVmxVirtApicSetVectorInReg(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector)
4046{
4047 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4048 Assert(pVmcs);
4049 uint32_t uReg;
4050 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1;
4051 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
4052 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg));
4053 if (RT_SUCCESS(rc))
4054 {
4055 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
4056 uReg |= RT_BIT(idxVectorBit);
4057 rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg));
4058 if (RT_FAILURE(rc))
4059 {
4060 AssertMsgFailed(("Failed to set vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n",
4061 uVector, offReg, GCPhysVirtApic));
4062 }
4063 }
4064 else
4065 {
4066 AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n",
4067 uVector, offReg, GCPhysVirtApic));
4068 }
4069}
4070
4071
4072/**
4073 * Clears the vector in a virtual-APIC 256-bit sparse register.
4074 *
4075 * @param pVCpu The cross context virtual CPU structure.
4076 * @param offReg The offset of the 256-bit spare register.
4077 * @param uVector The vector to clear.
4078 *
4079 * @remarks This is based on our APIC device code.
4080 */
4081IEM_STATIC void iemVmxVirtApicClearVectorInReg(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector)
4082{
4083 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4084 Assert(pVmcs);
4085 uint32_t uReg;
4086 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1;
4087 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
4088 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &uReg, GCPhysVirtApic + offReg + offVector, sizeof(uReg));
4089 if (RT_SUCCESS(rc))
4090 {
4091 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
4092 uReg &= ~RT_BIT(idxVectorBit);
4093 rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic + offReg + offVector, &uReg, sizeof(uReg));
4094 if (RT_FAILURE(rc))
4095 {
4096 AssertMsgFailed(("Failed to clear vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n",
4097 uVector, offReg, GCPhysVirtApic));
4098 }
4099 }
4100 else
4101 {
4102 AssertMsgFailed(("Failed to get vector %#x in 256-bit register at %#x of the virtual-APIC page at %#RGp\n",
4103 uVector, offReg, GCPhysVirtApic));
4104 }
4105}
4106
4107
4108/**
4109 * Checks if a memory access to the APIC-access page must causes an APIC-access
4110 * VM-exit.
4111 *
4112 * @param pVCpu The cross context virtual CPU structure.
4113 * @param offAccess The offset of the register being accessed.
4114 * @param cbAccess The size of the access in bytes.
4115 * @param fAccess The type of access (must be IEM_ACCESS_TYPE_READ or
4116 * IEM_ACCESS_TYPE_WRITE).
4117 *
4118 * @remarks This must not be used for MSR-based APIC-access page accesses!
4119 * @sa iemVmxVirtApicAccessMsrWrite, iemVmxVirtApicAccessMsrRead.
4120 */
4121IEM_STATIC bool iemVmxVirtApicIsMemAccessIntercepted(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, uint32_t fAccess)
4122{
4123 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4124 Assert(pVmcs);
4125 Assert(fAccess == IEM_ACCESS_TYPE_READ || fAccess == IEM_ACCESS_TYPE_WRITE);
4126
4127 /*
4128 * We must cause a VM-exit if any of the following are true:
4129 * - TPR shadowing isn't active.
4130 * - The access size exceeds 32-bits.
4131 * - The access is not contained within low 4 bytes of a 16-byte aligned offset.
4132 *
4133 * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
4134 * See Intel spec. 29.4.3.1 "Determining Whether a Write Access is Virtualized".
4135 */
4136 if ( !(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4137 || cbAccess > sizeof(uint32_t)
4138 || ((offAccess + cbAccess - 1) & 0xc)
4139 || offAccess >= XAPIC_OFF_END + 4)
4140 return true;
4141
4142 /*
4143 * If the access is part of an operation where we have already
4144 * virtualized a virtual-APIC write, we must cause a VM-exit.
4145 */
4146 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4147 return true;
4148
4149 /*
4150 * Check write accesses to the APIC-access page that cause VM-exits.
4151 */
4152 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4153 {
4154 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4155 {
4156 /*
4157 * With APIC-register virtualization, a write access to any of the
4158 * following registers are virtualized. Accessing any other register
4159 * causes a VM-exit.
4160 */
4161 uint16_t const offAlignedAccess = offAccess & 0xfffc;
4162 switch (offAlignedAccess)
4163 {
4164 case XAPIC_OFF_ID:
4165 case XAPIC_OFF_TPR:
4166 case XAPIC_OFF_EOI:
4167 case XAPIC_OFF_LDR:
4168 case XAPIC_OFF_DFR:
4169 case XAPIC_OFF_SVR:
4170 case XAPIC_OFF_ESR:
4171 case XAPIC_OFF_ICR_LO:
4172 case XAPIC_OFF_ICR_HI:
4173 case XAPIC_OFF_LVT_TIMER:
4174 case XAPIC_OFF_LVT_THERMAL:
4175 case XAPIC_OFF_LVT_PERF:
4176 case XAPIC_OFF_LVT_LINT0:
4177 case XAPIC_OFF_LVT_LINT1:
4178 case XAPIC_OFF_LVT_ERROR:
4179 case XAPIC_OFF_TIMER_ICR:
4180 case XAPIC_OFF_TIMER_DCR:
4181 break;
4182 default:
4183 return true;
4184 }
4185 }
4186 else if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4187 {
4188 /*
4189 * With virtual-interrupt delivery, a write access to any of the
4190 * following registers are virtualized. Accessing any other register
4191 * causes a VM-exit.
4192 *
4193 * Note! The specification does not allow writing to offsets in-between
4194 * these registers (e.g. TPR + 1 byte) unlike read accesses.
4195 */
4196 switch (offAccess)
4197 {
4198 case XAPIC_OFF_TPR:
4199 case XAPIC_OFF_EOI:
4200 case XAPIC_OFF_ICR_LO:
4201 break;
4202 default:
4203 return true;
4204 }
4205 }
4206 else
4207 {
4208 /*
4209 * Without APIC-register virtualization or virtual-interrupt delivery,
4210 * only TPR accesses are virtualized.
4211 */
4212 if (offAccess == XAPIC_OFF_TPR)
4213 { /* likely */ }
4214 else
4215 return true;
4216 }
4217 }
4218 else
4219 {
4220 /*
4221 * Check read accesses to the APIC-access page that cause VM-exits.
4222 */
4223 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4224 {
4225 /*
4226 * With APIC-register virtualization, a read access to any of the
4227 * following registers are virtualized. Accessing any other register
4228 * causes a VM-exit.
4229 */
4230 uint16_t const offAlignedAccess = offAccess & 0xfffc;
4231 switch (offAlignedAccess)
4232 {
4233 /** @todo r=ramshankar: What about XAPIC_OFF_LVT_CMCI? */
4234 case XAPIC_OFF_ID:
4235 case XAPIC_OFF_VERSION:
4236 case XAPIC_OFF_TPR:
4237 case XAPIC_OFF_EOI:
4238 case XAPIC_OFF_LDR:
4239 case XAPIC_OFF_DFR:
4240 case XAPIC_OFF_SVR:
4241 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
4242 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
4243 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
4244 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
4245 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
4246 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
4247 case XAPIC_OFF_ESR:
4248 case XAPIC_OFF_ICR_LO:
4249 case XAPIC_OFF_ICR_HI:
4250 case XAPIC_OFF_LVT_TIMER:
4251 case XAPIC_OFF_LVT_THERMAL:
4252 case XAPIC_OFF_LVT_PERF:
4253 case XAPIC_OFF_LVT_LINT0:
4254 case XAPIC_OFF_LVT_LINT1:
4255 case XAPIC_OFF_LVT_ERROR:
4256 case XAPIC_OFF_TIMER_ICR:
4257 case XAPIC_OFF_TIMER_DCR:
4258 break;
4259 default:
4260 return true;
4261 }
4262 }
4263 else
4264 {
4265 /* Without APIC-register virtualization, only TPR accesses are virtualized. */
4266 if (offAccess == XAPIC_OFF_TPR)
4267 { /* likely */ }
4268 else
4269 return true;
4270 }
4271 }
4272
4273 /* The APIC access is virtualized, does not cause a VM-exit. */
4274 return false;
4275}
4276
4277
4278/**
4279 * Virtualizes a memory-based APIC access where the address is not used to access
4280 * memory.
4281 *
4282 * This is for instructions like MONITOR, CLFLUSH, CLFLUSHOPT, ENTER which may cause
4283 * page-faults but do not use the address to access memory.
4284 *
4285 * @param pVCpu The cross context virtual CPU structure.
4286 * @param pGCPhysAccess Pointer to the guest-physical address used.
4287 */
4288IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPU pVCpu, PRTGCPHYS pGCPhysAccess)
4289{
4290 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4291 Assert(pVmcs);
4292 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
4293 Assert(pGCPhysAccess);
4294
4295 RTGCPHYS const GCPhysAccess = *pGCPhysAccess & ~(RTGCPHYS)PAGE_OFFSET_MASK;
4296 RTGCPHYS const GCPhysApic = pVmcs->u64AddrApicAccess.u;
4297 Assert(!(GCPhysApic & PAGE_OFFSET_MASK));
4298
4299 if (GCPhysAccess == GCPhysApic)
4300 {
4301 uint16_t const offAccess = *pGCPhysAccess & PAGE_OFFSET_MASK;
4302 uint32_t const fAccess = IEM_ACCESS_TYPE_READ;
4303 uint16_t const cbAccess = 1;
4304 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess);
4305 if (fIntercept)
4306 return iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess);
4307
4308 *pGCPhysAccess = GCPhysApic | offAccess;
4309 return VINF_VMX_MODIFIES_BEHAVIOR;
4310 }
4311
4312 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4313}
4314
4315
4316/**
4317 * Virtualizes a memory-based APIC access.
4318 *
4319 * @returns VBox strict status code.
4320 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the access was virtualized.
4321 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
4322 *
4323 * @param pVCpu The cross context virtual CPU structure.
4324 * @param offAccess The offset of the register being accessed (within the
4325 * APIC-access page).
4326 * @param cbAccess The size of the access in bytes.
4327 * @param pvData Pointer to the data being written or where to store the data
4328 * being read.
4329 * @param fAccess The type of access (must contain IEM_ACCESS_TYPE_READ or
4330 * IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION).
4331 */
4332IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData,
4333 uint32_t fAccess)
4334{
4335 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4336 Assert(pVmcs);
4337 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS); NOREF(pVmcs);
4338 Assert(pvData);
4339 Assert( (fAccess & IEM_ACCESS_TYPE_READ)
4340 || (fAccess & IEM_ACCESS_TYPE_WRITE)
4341 || (fAccess & IEM_ACCESS_INSTRUCTION));
4342
4343 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess);
4344 if (fIntercept)
4345 return iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess);
4346
4347 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4348 {
4349 /*
4350 * A write access to the APIC-access page that is virtualized (rather than
4351 * causing a VM-exit) writes data to the virtual-APIC page.
4352 */
4353 uint32_t const u32Data = *(uint32_t *)pvData;
4354 iemVmxVirtApicWriteRaw32(pVCpu, offAccess, u32Data);
4355
4356 /*
4357 * Record the currently updated APIC offset, as we need this later for figuring
4358 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4359 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4360 *
4361 * After completion of the current operation, we need to perform TPR virtualization,
4362 * EOI virtualization or APIC-write VM-exit depending on which register was written.
4363 *
4364 * The current operation may be a REP-prefixed string instruction, execution of any
4365 * other instruction, or delivery of an event through the IDT.
4366 *
4367 * Thus things like clearing bytes 3:1 of the VTPR, clearing VEOI are not to be
4368 * performed now but later after completion of the current operation.
4369 *
4370 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4371 */
4372 iemVmxVirtApicSetPendingWrite(pVCpu, offAccess);
4373 }
4374 else
4375 {
4376 /*
4377 * A read access from the APIC-access page that is virtualized (rather than
4378 * causing a VM-exit) returns data from the virtual-APIC page.
4379 *
4380 * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
4381 */
4382 Assert(cbAccess <= 4);
4383 Assert(offAccess < XAPIC_OFF_END + 4);
4384 static uint32_t const s_auAccessSizeMasks[] = { 0, 0xff, 0xffff, 0xffffff, 0xffffffff };
4385
4386 uint32_t u32Data = iemVmxVirtApicReadRaw32(pVCpu, offAccess);
4387 u32Data &= s_auAccessSizeMasks[cbAccess];
4388 *(uint32_t *)pvData = u32Data;
4389 }
4390
4391 return VINF_VMX_MODIFIES_BEHAVIOR;
4392}
4393
4394
4395/**
4396 * Virtualizes an MSR-based APIC read access.
4397 *
4398 * @returns VBox strict status code.
4399 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR read was virtualized.
4400 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR read access must be
4401 * handled by the x2APIC device.
4402 * @retval VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
4403 * not within the range of valid MSRs, caller must raise \#GP(0).
4404 * @param pVCpu The cross context virtual CPU structure.
4405 * @param idMsr The x2APIC MSR being read.
4406 * @param pu64Value Where to store the read x2APIC MSR value (only valid when
4407 * VINF_VMX_MODIFIES_BEHAVIOR is returned).
4408 */
4409IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value)
4410{
4411 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4412 Assert(pVmcs);
4413 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
4414 Assert(pu64Value);
4415
4416 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4417 {
4418 /*
4419 * Intel has different ideas in the x2APIC spec. vs the VT-x spec. as to
4420 * what the end of the valid x2APIC MSR range is. Hence the use of different
4421 * macros here.
4422 *
4423 * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
4424 * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
4425 */
4426 if ( idMsr >= VMX_V_VIRT_APIC_MSR_START
4427 && idMsr <= VMX_V_VIRT_APIC_MSR_END)
4428 {
4429 uint16_t const offReg = (idMsr & 0xff) << 4;
4430 uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
4431 *pu64Value = u64Value;
4432 return VINF_VMX_MODIFIES_BEHAVIOR;
4433 }
4434 return VERR_OUT_OF_RANGE;
4435 }
4436
4437 if (idMsr == MSR_IA32_X2APIC_TPR)
4438 {
4439 uint16_t const offReg = (idMsr & 0xff) << 4;
4440 uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
4441 *pu64Value = u64Value;
4442 return VINF_VMX_MODIFIES_BEHAVIOR;
4443 }
4444
4445 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4446}
4447
4448
4449/**
4450 * Virtualizes an MSR-based APIC write access.
4451 *
4452 * @returns VBox strict status code.
4453 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR write was virtualized.
4454 * @retval VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
4455 * not within the range of valid MSRs, caller must raise \#GP(0).
4456 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR must be written normally.
4457 *
4458 * @param pVCpu The cross context virtual CPU structure.
4459 * @param idMsr The x2APIC MSR being written.
4460 * @param u64Value The value of the x2APIC MSR being written.
4461 */
4462IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value)
4463{
4464 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4465 Assert(pVmcs);
4466
4467 /*
4468 * Check if the access is to be virtualized.
4469 * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
4470 */
4471 if ( idMsr == MSR_IA32_X2APIC_TPR
4472 || ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4473 && ( idMsr == MSR_IA32_X2APIC_EOI
4474 || idMsr == MSR_IA32_X2APIC_SELF_IPI)))
4475 {
4476 /* Validate the MSR write depending on the register. */
4477 switch (idMsr)
4478 {
4479 case MSR_IA32_X2APIC_TPR:
4480 case MSR_IA32_X2APIC_SELF_IPI:
4481 {
4482 if (u64Value & UINT64_C(0xffffffffffffff00))
4483 return VERR_OUT_OF_RANGE;
4484 break;
4485 }
4486 case MSR_IA32_X2APIC_EOI:
4487 {
4488 if (u64Value != 0)
4489 return VERR_OUT_OF_RANGE;
4490 break;
4491 }
4492 }
4493
4494 /* Write the MSR to the virtual-APIC page. */
4495 uint16_t const offReg = (idMsr & 0xff) << 4;
4496 iemVmxVirtApicWriteRaw64(pVCpu, offReg, u64Value);
4497
4498 /*
4499 * Record the currently updated APIC offset, as we need this later for figuring
4500 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4501 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4502 */
4503 iemVmxVirtApicSetPendingWrite(pVCpu, offReg);
4504
4505 return VINF_VMX_MODIFIES_BEHAVIOR;
4506 }
4507
4508 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4509}
4510
4511
4512/**
4513 * Finds the most significant set bit in a virtual-APIC 256-bit sparse register.
4514 *
4515 * @returns VBox status code.
4516 * @retval VINF_SUCCESS when the highest set bit is found.
4517 * @retval VERR_NOT_FOUND when no bit is set.
4518 *
4519 * @param pVCpu The cross context virtual CPU structure.
4520 * @param offReg The offset of the APIC 256-bit sparse register.
4521 * @param pidxHighestBit Where to store the highest bit (most significant bit)
4522 * set in the register. Only valid when VINF_SUCCESS is
4523 * returned.
4524 *
4525 * @remarks The format of the 256-bit sparse register here mirrors that found in
4526 * real APIC hardware.
4527 */
4528static int iemVmxVirtApicGetHighestSetBitInReg(PVMCPU pVCpu, uint16_t offReg, uint8_t *pidxHighestBit)
4529{
4530 Assert(offReg < XAPIC_OFF_END + 4);
4531 Assert(pidxHighestBit);
4532 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
4533
4534 /*
4535 * There are 8 contiguous fragments (of 16-bytes each) in the sparse register.
4536 * However, in each fragment only the first 4 bytes are used.
4537 */
4538 uint8_t const cFrags = 8;
4539 for (int8_t iFrag = cFrags; iFrag >= 0; iFrag--)
4540 {
4541 uint16_t const offFrag = iFrag * 16;
4542 uint32_t const u32Frag = iemVmxVirtApicReadRaw32(pVCpu, offReg + offFrag);
4543 if (!u32Frag)
4544 continue;
4545
4546 unsigned idxHighestBit = ASMBitLastSetU32(u32Frag);
4547 Assert(idxHighestBit > 0);
4548 --idxHighestBit;
4549 Assert(idxHighestBit <= UINT8_MAX);
4550 *pidxHighestBit = idxHighestBit;
4551 return VINF_SUCCESS;
4552 }
4553 return VERR_NOT_FOUND;
4554}
4555
4556
4557/**
4558 * Evaluates pending virtual interrupts.
4559 *
4560 * @param pVCpu The cross context virtual CPU structure.
4561 */
4562IEM_STATIC void iemVmxEvalPendingVirtIntrs(PVMCPU pVCpu)
4563{
4564 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4565 Assert(pVmcs);
4566 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4567
4568 if (!(pVmcs->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4569 {
4570 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
4571 uint8_t const uPpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_PPR);
4572
4573 if ((uRvi >> 4) > (uPpr >> 4))
4574 {
4575 Log2(("eval_virt_intrs: uRvi=%#x uPpr=%#x - Signaling pending interrupt\n", uRvi, uPpr));
4576 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
4577 }
4578 else
4579 Log2(("eval_virt_intrs: uRvi=%#x uPpr=%#x - Nothing to do\n", uRvi, uPpr));
4580 }
4581}
4582
4583
4584/**
4585 * Performs PPR virtualization.
4586 *
4587 * @returns VBox strict status code.
4588 * @param pVCpu The cross context virtual CPU structure.
4589 */
4590IEM_STATIC void iemVmxPprVirtualization(PVMCPU pVCpu)
4591{
4592 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4593 Assert(pVmcs);
4594 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4595 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4596
4597 /*
4598 * PPR virtualization is caused in response to a VM-entry, TPR-virtualization,
4599 * or EOI-virtualization.
4600 *
4601 * See Intel spec. 29.1.3 "PPR Virtualization".
4602 */
4603 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4604 uint32_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
4605
4606 uint32_t uPpr;
4607 if (((uTpr >> 4) & 0xf) >= ((uSvi >> 4) & 0xf))
4608 uPpr = uTpr & 0xff;
4609 else
4610 uPpr = uSvi & 0xf0;
4611
4612 Log2(("ppr_virt: uTpr=%#x uSvi=%#x uPpr=%#x\n", uTpr, uSvi, uPpr));
4613 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_PPR, uPpr);
4614}
4615
4616
4617/**
4618 * Performs VMX TPR virtualization.
4619 *
4620 * @returns VBox strict status code.
4621 * @param pVCpu The cross context virtual CPU structure.
4622 */
4623IEM_STATIC VBOXSTRICTRC iemVmxTprVirtualization(PVMCPU pVCpu)
4624{
4625 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4626 Assert(pVmcs);
4627 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4628
4629 /*
4630 * We should have already performed the virtual-APIC write to the TPR offset
4631 * in the virtual-APIC page. We now perform TPR virtualization.
4632 *
4633 * See Intel spec. 29.1.2 "TPR Virtualization".
4634 */
4635 if (!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
4636 {
4637 uint32_t const uTprThreshold = pVmcs->u32TprThreshold;
4638 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4639
4640 /*
4641 * If the VTPR falls below the TPR threshold, we must cause a VM-exit.
4642 * See Intel spec. 29.1.2 "TPR Virtualization".
4643 */
4644 if (((uTpr >> 4) & 0xf) < uTprThreshold)
4645 {
4646 Log2(("tpr_virt: uTpr=%u uTprThreshold=%u -> VM-exit\n", uTpr, uTprThreshold));
4647 return iemVmxVmexit(pVCpu, VMX_EXIT_TPR_BELOW_THRESHOLD, 0 /* u64ExitQual */);
4648 }
4649 }
4650 else
4651 {
4652 iemVmxPprVirtualization(pVCpu);
4653 iemVmxEvalPendingVirtIntrs(pVCpu);
4654 }
4655
4656 return VINF_SUCCESS;
4657}
4658
4659
4660/**
4661 * Checks whether an EOI write for the given interrupt vector causes a VM-exit or
4662 * not.
4663 *
4664 * @returns @c true if the EOI write is intercepted, @c false otherwise.
4665 * @param pVCpu The cross context virtual CPU structure.
4666 * @param uVector The interrupt that was acknowledged using an EOI.
4667 */
4668IEM_STATIC bool iemVmxIsEoiInterceptSet(PCVMCPU pVCpu, uint8_t uVector)
4669{
4670 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4671 Assert(pVmcs);
4672 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4673
4674 if (uVector < 64)
4675 return RT_BOOL(pVmcs->u64EoiExitBitmap0.u & RT_BIT_64(uVector));
4676 if (uVector < 128)
4677 return RT_BOOL(pVmcs->u64EoiExitBitmap1.u & RT_BIT_64(uVector));
4678 if (uVector < 192)
4679 return RT_BOOL(pVmcs->u64EoiExitBitmap2.u & RT_BIT_64(uVector));
4680 return RT_BOOL(pVmcs->u64EoiExitBitmap3.u & RT_BIT_64(uVector));
4681}
4682
4683
4684/**
4685 * Performs EOI virtualization.
4686 *
4687 * @returns VBox strict status code.
4688 * @param pVCpu The cross context virtual CPU structure.
4689 */
4690IEM_STATIC VBOXSTRICTRC iemVmxEoiVirtualization(PVMCPU pVCpu)
4691{
4692 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4693 Assert(pVmcs);
4694 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4695
4696 /*
4697 * Clear the interrupt guest-interrupt as no longer in-service (ISR)
4698 * and get the next guest-interrupt that's in-service (if any).
4699 *
4700 * See Intel spec. 29.1.4 "EOI Virtualization".
4701 */
4702 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
4703 uint8_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
4704 Log2(("eoi_virt: uRvi=%#x uSvi=%#x\n", uRvi, uSvi));
4705
4706 uint8_t uVector = uSvi;
4707 iemVmxVirtApicClearVectorInReg(pVCpu, XAPIC_OFF_ISR0, uVector);
4708
4709 uVector = 0;
4710 iemVmxVirtApicGetHighestSetBitInReg(pVCpu, XAPIC_OFF_ISR0, &uVector);
4711
4712 if (uVector)
4713 Log2(("eoi_virt: next interrupt %#x\n", uVector));
4714 else
4715 Log2(("eoi_virt: no interrupt pending in ISR\n"));
4716
4717 /* Update guest-interrupt status SVI (leave RVI portion as it is) in the VMCS. */
4718 pVmcs->u16GuestIntStatus = RT_MAKE_U16(uRvi, uVector);
4719
4720 iemVmxPprVirtualization(pVCpu);
4721 if (iemVmxIsEoiInterceptSet(pVCpu, uVector))
4722 return iemVmxVmexit(pVCpu, VMX_EXIT_VIRTUALIZED_EOI, uVector);
4723 iemVmxEvalPendingVirtIntrs(pVCpu);
4724 return VINF_SUCCESS;
4725}
4726
4727
4728/**
4729 * Performs self-IPI virtualization.
4730 *
4731 * @returns VBox strict status code.
4732 * @param pVCpu The cross context virtual CPU structure.
4733 */
4734IEM_STATIC VBOXSTRICTRC iemVmxSelfIpiVirtualization(PVMCPU pVCpu)
4735{
4736 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4737 Assert(pVmcs);
4738 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4739
4740 /*
4741 * We should have already performed the virtual-APIC write to the self-IPI offset
4742 * in the virtual-APIC page. We now perform self-IPI virtualization.
4743 *
4744 * See Intel spec. 29.1.5 "Self-IPI Virtualization".
4745 */
4746 uint8_t const uVector = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_ICR_LO);
4747 Log2(("self_ipi_virt: uVector=%#x\n", uVector));
4748 iemVmxVirtApicSetVectorInReg(pVCpu, XAPIC_OFF_IRR0, uVector);
4749 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
4750 uint8_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
4751 if (uVector > uRvi)
4752 pVmcs->u16GuestIntStatus = RT_MAKE_U16(uVector, uSvi);
4753 iemVmxEvalPendingVirtIntrs(pVCpu);
4754 return VINF_SUCCESS;
4755}
4756
4757
4758/**
4759 * Performs VMX APIC-write emulation.
4760 *
4761 * @returns VBox strict status code.
4762 * @param pVCpu The cross context virtual CPU structure.
4763 */
4764IEM_STATIC VBOXSTRICTRC iemVmxApicWriteEmulation(PVMCPU pVCpu)
4765{
4766 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4767 Assert(pVmcs);
4768
4769 /* Import the virtual-APIC write offset (part of the hardware-virtualization state). */
4770 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
4771
4772 /*
4773 * Perform APIC-write emulation based on the virtual-APIC register written.
4774 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4775 */
4776 uint16_t const offApicWrite = iemVmxVirtApicClearPendingWrite(pVCpu);
4777 VBOXSTRICTRC rcStrict;
4778 switch (offApicWrite)
4779 {
4780 case XAPIC_OFF_TPR:
4781 {
4782 /* Clear bytes 3:1 of the VTPR and perform TPR virtualization. */
4783 uint32_t uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4784 uTpr &= UINT32_C(0x000000ff);
4785 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uTpr);
4786 Log2(("iemVmxApicWriteEmulation: TPR write %#x\n", uTpr));
4787 rcStrict = iemVmxTprVirtualization(pVCpu);
4788 break;
4789 }
4790
4791 case XAPIC_OFF_EOI:
4792 {
4793 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4794 {
4795 /* Clear VEOI and perform EOI virtualization. */
4796 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_EOI, 0);
4797 Log2(("iemVmxApicWriteEmulation: EOI write\n"));
4798 rcStrict = iemVmxEoiVirtualization(pVCpu);
4799 }
4800 else
4801 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4802 break;
4803 }
4804
4805 case XAPIC_OFF_ICR_LO:
4806 {
4807 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4808 {
4809 /* If the ICR_LO is valid, write it and perform self-IPI virtualization. */
4810 uint32_t const uIcrLo = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4811 uint32_t const fIcrLoMb0 = UINT32_C(0xfffbb700);
4812 uint32_t const fIcrLoMb1 = UINT32_C(0x000000f0);
4813 if ( !(uIcrLo & fIcrLoMb0)
4814 && (uIcrLo & fIcrLoMb1))
4815 {
4816 Log2(("iemVmxApicWriteEmulation: Self-IPI virtualization with vector %#x\n", (uIcrLo & 0xff)));
4817 rcStrict = iemVmxSelfIpiVirtualization(pVCpu);
4818 }
4819 else
4820 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4821 }
4822 else
4823 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4824 break;
4825 }
4826
4827 case XAPIC_OFF_ICR_HI:
4828 {
4829 /* Clear bytes 2:0 of VICR_HI. No other virtualization or VM-exit must occur. */
4830 uint32_t uIcrHi = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_ICR_HI);
4831 uIcrHi &= UINT32_C(0xff000000);
4832 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_ICR_HI, uIcrHi);
4833 rcStrict = VINF_SUCCESS;
4834 break;
4835 }
4836
4837 default:
4838 {
4839 /* Writes to any other virtual-APIC register causes an APIC-write VM-exit. */
4840 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4841 break;
4842 }
4843 }
4844
4845 return rcStrict;
4846}
4847
4848
4849/**
4850 * Checks guest control registers, debug registers and MSRs as part of VM-entry.
4851 *
4852 * @param pVCpu The cross context virtual CPU structure.
4853 * @param pszInstr The VMX instruction name (for logging purposes).
4854 */
4855IEM_STATIC int iemVmxVmentryCheckGuestControlRegsMsrs(PVMCPU pVCpu, const char *pszInstr)
4856{
4857 /*
4858 * Guest Control Registers, Debug Registers, and MSRs.
4859 * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs".
4860 */
4861 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4862 const char *const pszFailure = "VM-exit";
4863 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
4864
4865 /* CR0 reserved bits. */
4866 {
4867 /* CR0 MB1 bits. */
4868 uint64_t u64Cr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
4869 Assert(!(u64Cr0Fixed0 & (X86_CR0_NW | X86_CR0_CD)));
4870 if (fUnrestrictedGuest)
4871 u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
4872 if ((pVmcs->u64GuestCr0.u & u64Cr0Fixed0) == u64Cr0Fixed0)
4873 { /* likely */ }
4874 else
4875 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed0);
4876
4877 /* CR0 MBZ bits. */
4878 uint64_t const u64Cr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
4879 if (!(pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1))
4880 { /* likely */ }
4881 else
4882 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1);
4883
4884 /* Without unrestricted guest support, VT-x supports does not support unpaged protected mode. */
4885 if ( !fUnrestrictedGuest
4886 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)
4887 && !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
4888 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0PgPe);
4889 }
4890
4891 /* CR4 reserved bits. */
4892 {
4893 /* CR4 MB1 bits. */
4894 uint64_t const u64Cr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
4895 if ((pVmcs->u64GuestCr4.u & u64Cr4Fixed0) == u64Cr4Fixed0)
4896 { /* likely */ }
4897 else
4898 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0);
4899
4900 /* CR4 MBZ bits. */
4901 uint64_t const u64Cr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
4902 if (!(pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1))
4903 { /* likely */ }
4904 else
4905 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1);
4906 }
4907
4908 /* DEBUGCTL MSR. */
4909 if ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4910 || !(pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL))
4911 { /* likely */ }
4912 else
4913 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDebugCtl);
4914
4915 /* 64-bit CPU checks. */
4916 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4917 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4918 {
4919 if (fGstInLongMode)
4920 {
4921 /* PAE must be set. */
4922 if ( (pVmcs->u64GuestCr0.u & X86_CR0_PG)
4923 && (pVmcs->u64GuestCr0.u & X86_CR4_PAE))
4924 { /* likely */ }
4925 else
4926 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPae);
4927 }
4928 else
4929 {
4930 /* PCIDE should not be set. */
4931 if (!(pVmcs->u64GuestCr4.u & X86_CR4_PCIDE))
4932 { /* likely */ }
4933 else
4934 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPcide);
4935 }
4936
4937 /* CR3. */
4938 if (!(pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
4939 { /* likely */ }
4940 else
4941 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr3);
4942
4943 /* DR7. */
4944 if ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4945 || !(pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK))
4946 { /* likely */ }
4947 else
4948 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDr7);
4949
4950 /* SYSENTER ESP and SYSENTER EIP. */
4951 if ( X86_IS_CANONICAL(pVmcs->u64GuestSysenterEsp.u)
4952 && X86_IS_CANONICAL(pVmcs->u64GuestSysenterEip.u))
4953 { /* likely */ }
4954 else
4955 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSysenterEspEip);
4956 }
4957
4958 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
4959 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
4960
4961 /* PAT MSR. */
4962 if ( !(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4963 || CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u))
4964 { /* likely */ }
4965 else
4966 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPatMsr);
4967
4968 /* EFER MSR. */
4969 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4970 {
4971 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
4972 if (!(pVmcs->u64GuestEferMsr.u & ~uValidEferMask))
4973 { /* likely */ }
4974 else
4975 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsrRsvd);
4976
4977 bool const fGstLma = RT_BOOL(pVmcs->u64GuestEferMsr.u & MSR_K6_EFER_LMA);
4978 bool const fGstLme = RT_BOOL(pVmcs->u64GuestEferMsr.u & MSR_K6_EFER_LME);
4979 if ( fGstLma == fGstInLongMode
4980 && ( !(pVmcs->u64GuestCr0.u & X86_CR0_PG)
4981 || fGstLma == fGstLme))
4982 { /* likely */ }
4983 else
4984 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsr);
4985 }
4986
4987 /* We don't support IA32_BNDCFGS MSR yet. */
4988 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
4989
4990 NOREF(pszInstr);
4991 NOREF(pszFailure);
4992 return VINF_SUCCESS;
4993}
4994
4995
4996/**
4997 * Checks guest segment registers, LDTR and TR as part of VM-entry.
4998 *
4999 * @param pVCpu The cross context virtual CPU structure.
5000 * @param pszInstr The VMX instruction name (for logging purposes).
5001 */
5002IEM_STATIC int iemVmxVmentryCheckGuestSegRegs(PVMCPU pVCpu, const char *pszInstr)
5003{
5004 /*
5005 * Segment registers.
5006 * See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
5007 */
5008 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5009 const char *const pszFailure = "VM-exit";
5010 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);
5011 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
5012 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5013
5014 /* Selectors. */
5015 if ( !fGstInV86Mode
5016 && !fUnrestrictedGuest
5017 && (pVmcs->GuestSs & X86_SEL_RPL) != (pVmcs->GuestCs & X86_SEL_RPL))
5018 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelCsSsRpl);
5019
5020 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
5021 {
5022 CPUMSELREG SelReg;
5023 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &SelReg);
5024 if (RT_LIKELY(rc == VINF_SUCCESS))
5025 { /* likely */ }
5026 else
5027 return rc;
5028
5029 /*
5030 * Virtual-8086 mode checks.
5031 */
5032 if (fGstInV86Mode)
5033 {
5034 /* Base address. */
5035 if (SelReg.u64Base == (uint64_t)SelReg.Sel << 4)
5036 { /* likely */ }
5037 else
5038 {
5039 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBaseV86(iSegReg);
5040 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5041 }
5042
5043 /* Limit. */
5044 if (SelReg.u32Limit == 0xffff)
5045 { /* likely */ }
5046 else
5047 {
5048 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegLimitV86(iSegReg);
5049 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5050 }
5051
5052 /* Attribute. */
5053 if (SelReg.Attr.u == 0xf3)
5054 { /* likely */ }
5055 else
5056 {
5057 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrV86(iSegReg);
5058 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5059 }
5060
5061 /* We're done; move to checking the next segment. */
5062 continue;
5063 }
5064
5065 /* Checks done by 64-bit CPUs. */
5066 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5067 {
5068 /* Base address. */
5069 if ( iSegReg == X86_SREG_FS
5070 || iSegReg == X86_SREG_GS)
5071 {
5072 if (X86_IS_CANONICAL(SelReg.u64Base))
5073 { /* likely */ }
5074 else
5075 {
5076 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
5077 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5078 }
5079 }
5080 else if (iSegReg == X86_SREG_CS)
5081 {
5082 if (!RT_HI_U32(SelReg.u64Base))
5083 { /* likely */ }
5084 else
5085 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseCs);
5086 }
5087 else
5088 {
5089 if ( SelReg.Attr.n.u1Unusable
5090 || !RT_HI_U32(SelReg.u64Base))
5091 { /* likely */ }
5092 else
5093 {
5094 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
5095 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5096 }
5097 }
5098 }
5099
5100 /*
5101 * Checks outside Virtual-8086 mode.
5102 */
5103 uint8_t const uSegType = SelReg.Attr.n.u4Type;
5104 uint8_t const fCodeDataSeg = SelReg.Attr.n.u1DescType;
5105 uint8_t const fUsable = !SelReg.Attr.n.u1Unusable;
5106 uint8_t const uDpl = SelReg.Attr.n.u2Dpl;
5107 uint8_t const fPresent = SelReg.Attr.n.u1Present;
5108 uint8_t const uGranularity = SelReg.Attr.n.u1Granularity;
5109 uint8_t const uDefBig = SelReg.Attr.n.u1DefBig;
5110 uint8_t const fSegLong = SelReg.Attr.n.u1Long;
5111
5112 /* Code or usable segment. */
5113 if ( iSegReg == X86_SREG_CS
5114 || fUsable)
5115 {
5116 /* Reserved bits (bits 31:17 and bits 11:8). */
5117 if (!(SelReg.Attr.u & 0xfffe0f00))
5118 { /* likely */ }
5119 else
5120 {
5121 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrRsvd(iSegReg);
5122 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5123 }
5124
5125 /* Descriptor type. */
5126 if (fCodeDataSeg)
5127 { /* likely */ }
5128 else
5129 {
5130 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDescType(iSegReg);
5131 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5132 }
5133
5134 /* Present. */
5135 if (fPresent)
5136 { /* likely */ }
5137 else
5138 {
5139 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrPresent(iSegReg);
5140 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5141 }
5142
5143 /* Granularity. */
5144 if ( ((SelReg.u32Limit & 0x00000fff) == 0x00000fff || !uGranularity)
5145 && ((SelReg.u32Limit & 0xfff00000) == 0x00000000 || uGranularity))
5146 { /* likely */ }
5147 else
5148 {
5149 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrGran(iSegReg);
5150 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5151 }
5152 }
5153
5154 if (iSegReg == X86_SREG_CS)
5155 {
5156 /* Segment Type and DPL. */
5157 if ( uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5158 && fUnrestrictedGuest)
5159 {
5160 if (uDpl == 0)
5161 { /* likely */ }
5162 else
5163 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplZero);
5164 }
5165 else if ( uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_ACCESSED)
5166 || uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
5167 {
5168 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5169 if (uDpl == AttrSs.n.u2Dpl)
5170 { /* likely */ }
5171 else
5172 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplEqSs);
5173 }
5174 else if ((uSegType & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
5175 == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
5176 {
5177 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5178 if (uDpl <= AttrSs.n.u2Dpl)
5179 { /* likely */ }
5180 else
5181 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplLtSs);
5182 }
5183 else
5184 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsType);
5185
5186 /* Def/Big. */
5187 if ( fGstInLongMode
5188 && fSegLong)
5189 {
5190 if (uDefBig == 0)
5191 { /* likely */ }
5192 else
5193 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDefBig);
5194 }
5195 }
5196 else if (iSegReg == X86_SREG_SS)
5197 {
5198 /* Segment Type. */
5199 if ( !fUsable
5200 || uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5201 || uSegType == (X86_SEL_TYPE_DOWN | X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED))
5202 { /* likely */ }
5203 else
5204 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsType);
5205
5206 /* DPL. */
5207 if (!fUnrestrictedGuest)
5208 {
5209 if (uDpl == (SelReg.Sel & X86_SEL_RPL))
5210 { /* likely */ }
5211 else
5212 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplEqRpl);
5213 }
5214 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
5215 if ( AttrCs.n.u4Type == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5216 || !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
5217 {
5218 if (uDpl == 0)
5219 { /* likely */ }
5220 else
5221 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplZero);
5222 }
5223 }
5224 else
5225 {
5226 /* DS, ES, FS, GS. */
5227 if (fUsable)
5228 {
5229 /* Segment type. */
5230 if (uSegType & X86_SEL_TYPE_ACCESSED)
5231 { /* likely */ }
5232 else
5233 {
5234 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrTypeAcc(iSegReg);
5235 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5236 }
5237
5238 if ( !(uSegType & X86_SEL_TYPE_CODE)
5239 || (uSegType & X86_SEL_TYPE_READ))
5240 { /* likely */ }
5241 else
5242 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsTypeRead);
5243
5244 /* DPL. */
5245 if ( !fUnrestrictedGuest
5246 && uSegType <= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
5247 {
5248 if (uDpl >= (SelReg.Sel & X86_SEL_RPL))
5249 { /* likely */ }
5250 else
5251 {
5252 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDplRpl(iSegReg);
5253 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5254 }
5255 }
5256 }
5257 }
5258 }
5259
5260 /*
5261 * LDTR.
5262 */
5263 {
5264 CPUMSELREG Ldtr;
5265 Ldtr.Sel = pVmcs->GuestLdtr;
5266 Ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
5267 Ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
5268 Ldtr.Attr.u = pVmcs->u32GuestLdtrAttr;
5269
5270 if (!Ldtr.Attr.n.u1Unusable)
5271 {
5272 /* Selector. */
5273 if (!(Ldtr.Sel & X86_SEL_LDT))
5274 { /* likely */ }
5275 else
5276 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelLdtr);
5277
5278 /* Base. */
5279 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5280 {
5281 if (X86_IS_CANONICAL(Ldtr.u64Base))
5282 { /* likely */ }
5283 else
5284 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseLdtr);
5285 }
5286
5287 /* Attributes. */
5288 /* Reserved bits (bits 31:17 and bits 11:8). */
5289 if (!(Ldtr.Attr.u & 0xfffe0f00))
5290 { /* likely */ }
5291 else
5292 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrRsvd);
5293
5294 if (Ldtr.Attr.n.u4Type == X86_SEL_TYPE_SYS_LDT)
5295 { /* likely */ }
5296 else
5297 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrType);
5298
5299 if (!Ldtr.Attr.n.u1DescType)
5300 { /* likely */ }
5301 else
5302 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrDescType);
5303
5304 if (Ldtr.Attr.n.u1Present)
5305 { /* likely */ }
5306 else
5307 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrPresent);
5308
5309 if ( ((Ldtr.u32Limit & 0x00000fff) == 0x00000fff || !Ldtr.Attr.n.u1Granularity)
5310 && ((Ldtr.u32Limit & 0xfff00000) == 0x00000000 || Ldtr.Attr.n.u1Granularity))
5311 { /* likely */ }
5312 else
5313 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrGran);
5314 }
5315 }
5316
5317 /*
5318 * TR.
5319 */
5320 {
5321 CPUMSELREG Tr;
5322 Tr.Sel = pVmcs->GuestTr;
5323 Tr.u32Limit = pVmcs->u32GuestTrLimit;
5324 Tr.u64Base = pVmcs->u64GuestTrBase.u;
5325 Tr.Attr.u = pVmcs->u32GuestTrAttr;
5326
5327 /* Selector. */
5328 if (!(Tr.Sel & X86_SEL_LDT))
5329 { /* likely */ }
5330 else
5331 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelTr);
5332
5333 /* Base. */
5334 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5335 {
5336 if (X86_IS_CANONICAL(Tr.u64Base))
5337 { /* likely */ }
5338 else
5339 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseTr);
5340 }
5341
5342 /* Attributes. */
5343 /* Reserved bits (bits 31:17 and bits 11:8). */
5344 if (!(Tr.Attr.u & 0xfffe0f00))
5345 { /* likely */ }
5346 else
5347 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrRsvd);
5348
5349 if (!Tr.Attr.n.u1Unusable)
5350 { /* likely */ }
5351 else
5352 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrUnusable);
5353
5354 if ( Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY
5355 || ( !fGstInLongMode
5356 && Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY))
5357 { /* likely */ }
5358 else
5359 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrType);
5360
5361 if (!Tr.Attr.n.u1DescType)
5362 { /* likely */ }
5363 else
5364 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrDescType);
5365
5366 if (Tr.Attr.n.u1Present)
5367 { /* likely */ }
5368 else
5369 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrPresent);
5370
5371 if ( ((Tr.u32Limit & 0x00000fff) == 0x00000fff || !Tr.Attr.n.u1Granularity)
5372 && ((Tr.u32Limit & 0xfff00000) == 0x00000000 || Tr.Attr.n.u1Granularity))
5373 { /* likely */ }
5374 else
5375 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrGran);
5376 }
5377
5378 NOREF(pszInstr);
5379 NOREF(pszFailure);
5380 return VINF_SUCCESS;
5381}
5382
5383
5384/**
5385 * Checks guest GDTR and IDTR as part of VM-entry.
5386 *
5387 * @param pVCpu The cross context virtual CPU structure.
5388 * @param pszInstr The VMX instruction name (for logging purposes).
5389 */
5390IEM_STATIC int iemVmxVmentryCheckGuestGdtrIdtr(PVMCPU pVCpu, const char *pszInstr)
5391{
5392 /*
5393 * GDTR and IDTR.
5394 * See Intel spec. 26.3.1.3 "Checks on Guest Descriptor-Table Registers".
5395 */
5396 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5397 const char *const pszFailure = "VM-exit";
5398
5399 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5400 {
5401 /* Base. */
5402 if (X86_IS_CANONICAL(pVmcs->u64GuestGdtrBase.u))
5403 { /* likely */ }
5404 else
5405 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrBase);
5406
5407 if (X86_IS_CANONICAL(pVmcs->u64GuestIdtrBase.u))
5408 { /* likely */ }
5409 else
5410 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrBase);
5411 }
5412
5413 /* Limit. */
5414 if (!RT_HI_U16(pVmcs->u32GuestGdtrLimit))
5415 { /* likely */ }
5416 else
5417 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrLimit);
5418
5419 if (!RT_HI_U16(pVmcs->u32GuestIdtrLimit))
5420 { /* likely */ }
5421 else
5422 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrLimit);
5423
5424 NOREF(pszInstr);
5425 NOREF(pszFailure);
5426 return VINF_SUCCESS;
5427}
5428
5429
5430/**
5431 * Checks guest RIP and RFLAGS as part of VM-entry.
5432 *
5433 * @param pVCpu The cross context virtual CPU structure.
5434 * @param pszInstr The VMX instruction name (for logging purposes).
5435 */
5436IEM_STATIC int iemVmxVmentryCheckGuestRipRFlags(PVMCPU pVCpu, const char *pszInstr)
5437{
5438 /*
5439 * RIP and RFLAGS.
5440 * See Intel spec. 26.3.1.4 "Checks on Guest RIP and RFLAGS".
5441 */
5442 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5443 const char *const pszFailure = "VM-exit";
5444 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5445
5446 /* RIP. */
5447 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5448 {
5449 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
5450 if ( !fGstInLongMode
5451 || !AttrCs.n.u1Long)
5452 {
5453 if (!RT_HI_U32(pVmcs->u64GuestRip.u))
5454 { /* likely */ }
5455 else
5456 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRipRsvd);
5457 }
5458
5459 if ( fGstInLongMode
5460 && AttrCs.n.u1Long)
5461 {
5462 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth == 48); /* Canonical. */
5463 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth < 64
5464 && X86_IS_CANONICAL(pVmcs->u64GuestRip.u))
5465 { /* likely */ }
5466 else
5467 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRip);
5468 }
5469 }
5470
5471 /* RFLAGS (bits 63:22 (or 31:22), bits 15, 5, 3 are reserved, bit 1 MB1). */
5472 uint64_t const uGuestRFlags = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode ? pVmcs->u64GuestRFlags.u
5473 : pVmcs->u64GuestRFlags.s.Lo;
5474 if ( !(uGuestRFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK))
5475 && (uGuestRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK)
5476 { /* likely */ }
5477 else
5478 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsRsvd);
5479
5480 if ( fGstInLongMode
5481 || !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
5482 {
5483 if (!(uGuestRFlags & X86_EFL_VM))
5484 { /* likely */ }
5485 else
5486 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsVm);
5487 }
5488
5489 if ( VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo)
5490 && VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo) == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5491 {
5492 if (uGuestRFlags & X86_EFL_IF)
5493 { /* likely */ }
5494 else
5495 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsIf);
5496 }
5497
5498 NOREF(pszInstr);
5499 NOREF(pszFailure);
5500 return VINF_SUCCESS;
5501}
5502
5503
5504/**
5505 * Checks guest non-register state as part of VM-entry.
5506 *
5507 * @param pVCpu The cross context virtual CPU structure.
5508 * @param pszInstr The VMX instruction name (for logging purposes).
5509 */
5510IEM_STATIC int iemVmxVmentryCheckGuestNonRegState(PVMCPU pVCpu, const char *pszInstr)
5511{
5512 /*
5513 * Guest non-register state.
5514 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
5515 */
5516 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5517 const char *const pszFailure = "VM-exit";
5518
5519 /*
5520 * Activity state.
5521 */
5522 uint64_t const u64GuestVmxMiscMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Misc;
5523 uint32_t const fActivityStateMask = RT_BF_GET(u64GuestVmxMiscMsr, VMX_BF_MISC_ACTIVITY_STATES);
5524 if (!(pVmcs->u32GuestActivityState & fActivityStateMask))
5525 { /* likely */ }
5526 else
5527 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateRsvd);
5528
5529 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5530 if ( !AttrSs.n.u2Dpl
5531 || pVmcs->u32GuestActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT)
5532 { /* likely */ }
5533 else
5534 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateSsDpl);
5535
5536 if ( pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI
5537 || pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5538 {
5539 if (pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE)
5540 { /* likely */ }
5541 else
5542 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateStiMovSs);
5543 }
5544
5545 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
5546 {
5547 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
5548 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(pVmcs->u32EntryIntInfo);
5549 AssertCompile(VMX_V_GUEST_ACTIVITY_STATE_MASK == (VMX_VMCS_GUEST_ACTIVITY_HLT | VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN));
5550 switch (pVmcs->u32GuestActivityState)
5551 {
5552 case VMX_VMCS_GUEST_ACTIVITY_HLT:
5553 {
5554 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT
5555 || uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
5556 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5557 && ( uVector == X86_XCPT_DB
5558 || uVector == X86_XCPT_MC))
5559 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
5560 && uVector == VMX_ENTRY_INT_INFO_VECTOR_MTF))
5561 { /* likely */ }
5562 else
5563 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateHlt);
5564 break;
5565 }
5566
5567 case VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN:
5568 {
5569 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
5570 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5571 && uVector == X86_XCPT_MC))
5572 { /* likely */ }
5573 else
5574 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateShutdown);
5575 break;
5576 }
5577
5578 case VMX_VMCS_GUEST_ACTIVITY_ACTIVE:
5579 default:
5580 break;
5581 }
5582 }
5583
5584 /*
5585 * Interruptibility state.
5586 */
5587 if (!(pVmcs->u32GuestIntrState & ~VMX_VMCS_GUEST_INT_STATE_MASK))
5588 { /* likely */ }
5589 else
5590 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRsvd);
5591
5592 if ((pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5593 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5594 { /* likely */ }
5595 else
5596 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateStiMovSs);
5597
5598 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_IF)
5599 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5600 { /* likely */ }
5601 else
5602 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRFlagsSti);
5603
5604 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
5605 {
5606 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
5607 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5608 {
5609 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
5610 { /* likely */ }
5611 else
5612 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateExtInt);
5613 }
5614 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5615 {
5616 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
5617 { /* likely */ }
5618 else
5619 {
5620 /*
5621 * We don't support injecting NMIs when blocking-by-STI would be in effect.
5622 * We update the Exit qualification only when blocking-by-STI is set
5623 * without blocking-by-MovSS being set. Although in practise it does not
5624 * make much difference since the order of checks are implementation defined.
5625 */
5626 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5627 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_NMI_INJECT);
5628 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateNmi);
5629 }
5630
5631 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5632 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI))
5633 { /* likely */ }
5634 else
5635 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateVirtNmi);
5636 }
5637 }
5638
5639 /* We don't support SMM yet. So blocking-by-SMIs must not be set. */
5640 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI))
5641 { /* likely */ }
5642 else
5643 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateSmi);
5644
5645 /* We don't support SGX yet. So enclave-interruption must not be set. */
5646 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_ENCLAVE))
5647 { /* likely */ }
5648 else
5649 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateEnclave);
5650
5651 /*
5652 * Pending debug exceptions.
5653 */
5654 uint64_t const uPendingDbgXcpt = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode
5655 ? pVmcs->u64GuestPendingDbgXcpt.u
5656 : pVmcs->u64GuestPendingDbgXcpt.s.Lo;
5657 if (!(uPendingDbgXcpt & ~VMX_VMCS_GUEST_PENDING_DEBUG_VALID_MASK))
5658 { /* likely */ }
5659 else
5660 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRsvd);
5661
5662 if ( (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5663 || pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5664 {
5665 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_TF)
5666 && !(pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF)
5667 && !(uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
5668 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf);
5669
5670 if ( ( !(pVmcs->u64GuestRFlags.u & X86_EFL_TF)
5671 || (pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF))
5672 && (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
5673 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf);
5674 }
5675
5676 /* We don't support RTM (Real-time Transactional Memory) yet. */
5677 if (!(uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_RTM))
5678 { /* likely */ }
5679 else
5680 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRtm);
5681
5682 /*
5683 * VMCS link pointer.
5684 */
5685 if (pVmcs->u64VmcsLinkPtr.u != UINT64_C(0xffffffffffffffff))
5686 {
5687 RTGCPHYS const GCPhysShadowVmcs = pVmcs->u64VmcsLinkPtr.u;
5688 /* We don't support SMM yet (so VMCS link pointer cannot be the current VMCS). */
5689 if (GCPhysShadowVmcs != IEM_VMX_GET_CURRENT_VMCS(pVCpu))
5690 { /* likely */ }
5691 else
5692 {
5693 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5694 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrCurVmcs);
5695 }
5696
5697 /* Validate the address. */
5698 if ( !(GCPhysShadowVmcs & X86_PAGE_4K_OFFSET_MASK)
5699 && !(GCPhysShadowVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5700 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysShadowVmcs))
5701 { /* likely */ }
5702 else
5703 {
5704 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5705 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmcsLinkPtr);
5706 }
5707
5708 /* Read the VMCS-link pointer from guest memory. */
5709 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs));
5710 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs),
5711 GCPhysShadowVmcs, VMX_V_VMCS_SIZE);
5712 if (RT_SUCCESS(rc))
5713 { /* likely */ }
5714 else
5715 {
5716 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5717 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys);
5718 }
5719
5720 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
5721 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID)
5722 { /* likely */ }
5723 else
5724 {
5725 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5726 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrRevId);
5727 }
5728
5729 /* Verify the shadow bit is set if VMCS shadowing is enabled . */
5730 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
5731 || pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.fIsShadowVmcs)
5732 { /* likely */ }
5733 else
5734 {
5735 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5736 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrShadow);
5737 }
5738
5739 /* Finally update our cache of the guest physical address of the shadow VMCS. */
5740 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs = GCPhysShadowVmcs;
5741 }
5742
5743 NOREF(pszInstr);
5744 NOREF(pszFailure);
5745 return VINF_SUCCESS;
5746}
5747
5748
5749/**
5750 * Checks if the PDPTEs referenced by the nested-guest CR3 are valid as part of
5751 * VM-entry.
5752 *
5753 * @returns @c true if all PDPTEs are valid, @c false otherwise.
5754 * @param pVCpu The cross context virtual CPU structure.
5755 * @param pszInstr The VMX instruction name (for logging purposes).
5756 * @param pVmcs Pointer to the virtual VMCS.
5757 */
5758IEM_STATIC int iemVmxVmentryCheckGuestPdptesForCr3(PVMCPU pVCpu, const char *pszInstr, PVMXVVMCS pVmcs)
5759{
5760 /*
5761 * Check PDPTEs.
5762 * See Intel spec. 4.4.1 "PDPTE Registers".
5763 */
5764 uint64_t const uGuestCr3 = pVmcs->u64GuestCr3.u & X86_CR3_PAE_PAGE_MASK;
5765 const char *const pszFailure = "VM-exit";
5766
5767 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
5768 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uGuestCr3, sizeof(aPdptes));
5769 if (RT_SUCCESS(rc))
5770 {
5771 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
5772 {
5773 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
5774 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
5775 { /* likely */ }
5776 else
5777 {
5778 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
5779 VMXVDIAG const enmDiag = iemVmxGetDiagVmentryPdpteRsvd(iPdpte);
5780 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5781 }
5782 }
5783 }
5784 else
5785 {
5786 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
5787 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys);
5788 }
5789
5790 NOREF(pszFailure);
5791 NOREF(pszInstr);
5792 return rc;
5793}
5794
5795
5796/**
5797 * Checks guest PDPTEs as part of VM-entry.
5798 *
5799 * @param pVCpu The cross context virtual CPU structure.
5800 * @param pszInstr The VMX instruction name (for logging purposes).
5801 */
5802IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPU pVCpu, const char *pszInstr)
5803{
5804 /*
5805 * Guest PDPTEs.
5806 * See Intel spec. 26.3.1.5 "Checks on Guest Page-Directory-Pointer-Table Entries".
5807 */
5808 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5809 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5810
5811 /* Check PDPTes if the VM-entry is to a guest using PAE paging. */
5812 int rc;
5813 if ( !fGstInLongMode
5814 && (pVmcs->u64GuestCr4.u & X86_CR4_PAE)
5815 && (pVmcs->u64GuestCr0.u & X86_CR0_PG))
5816 {
5817 /*
5818 * We don't support nested-paging for nested-guests yet.
5819 *
5820 * Without nested-paging for nested-guests, PDPTEs in the VMCS are not used,
5821 * rather we need to check the PDPTEs referenced by the guest CR3.
5822 */
5823 rc = iemVmxVmentryCheckGuestPdptesForCr3(pVCpu, pszInstr, pVmcs);
5824 }
5825 else
5826 rc = VINF_SUCCESS;
5827 return rc;
5828}
5829
5830
5831/**
5832 * Checks guest-state as part of VM-entry.
5833 *
5834 * @returns VBox status code.
5835 * @param pVCpu The cross context virtual CPU structure.
5836 * @param pszInstr The VMX instruction name (for logging purposes).
5837 */
5838IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr)
5839{
5840 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
5841 if (RT_SUCCESS(rc))
5842 {
5843 rc = iemVmxVmentryCheckGuestSegRegs(pVCpu, pszInstr);
5844 if (RT_SUCCESS(rc))
5845 {
5846 rc = iemVmxVmentryCheckGuestGdtrIdtr(pVCpu, pszInstr);
5847 if (RT_SUCCESS(rc))
5848 {
5849 rc = iemVmxVmentryCheckGuestRipRFlags(pVCpu, pszInstr);
5850 if (RT_SUCCESS(rc))
5851 {
5852 rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr);
5853 if (RT_SUCCESS(rc))
5854 return iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr);
5855 }
5856 }
5857 }
5858 }
5859 return rc;
5860}
5861
5862
5863/**
5864 * Checks host-state as part of VM-entry.
5865 *
5866 * @returns VBox status code.
5867 * @param pVCpu The cross context virtual CPU structure.
5868 * @param pszInstr The VMX instruction name (for logging purposes).
5869 */
5870IEM_STATIC int iemVmxVmentryCheckHostState(PVMCPU pVCpu, const char *pszInstr)
5871{
5872 /*
5873 * Host Control Registers and MSRs.
5874 * See Intel spec. 26.2.2 "Checks on Host Control Registers and MSRs".
5875 */
5876 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5877 const char * const pszFailure = "VMFail";
5878
5879 /* CR0 reserved bits. */
5880 {
5881 /* CR0 MB1 bits. */
5882 uint64_t const u64Cr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
5883 if ((pVmcs->u64HostCr0.u & u64Cr0Fixed0) == u64Cr0Fixed0)
5884 { /* likely */ }
5885 else
5886 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0);
5887
5888 /* CR0 MBZ bits. */
5889 uint64_t const u64Cr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
5890 if (!(pVmcs->u64HostCr0.u & ~u64Cr0Fixed1))
5891 { /* likely */ }
5892 else
5893 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1);
5894 }
5895
5896 /* CR4 reserved bits. */
5897 {
5898 /* CR4 MB1 bits. */
5899 uint64_t const u64Cr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
5900 if ((pVmcs->u64HostCr4.u & u64Cr4Fixed0) == u64Cr4Fixed0)
5901 { /* likely */ }
5902 else
5903 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0);
5904
5905 /* CR4 MBZ bits. */
5906 uint64_t const u64Cr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
5907 if (!(pVmcs->u64HostCr4.u & ~u64Cr4Fixed1))
5908 { /* likely */ }
5909 else
5910 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1);
5911 }
5912
5913 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5914 {
5915 /* CR3 reserved bits. */
5916 if (!(pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
5917 { /* likely */ }
5918 else
5919 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr3);
5920
5921 /* SYSENTER ESP and SYSENTER EIP. */
5922 if ( X86_IS_CANONICAL(pVmcs->u64HostSysenterEsp.u)
5923 && X86_IS_CANONICAL(pVmcs->u64HostSysenterEip.u))
5924 { /* likely */ }
5925 else
5926 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSysenterEspEip);
5927 }
5928
5929 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
5930 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PERF_MSR));
5931
5932 /* PAT MSR. */
5933 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
5934 || CPUMIsPatMsrValid(pVmcs->u64HostPatMsr.u))
5935 { /* likely */ }
5936 else
5937 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostPatMsr);
5938
5939 /* EFER MSR. */
5940 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
5941 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
5942 || !(pVmcs->u64HostEferMsr.u & ~uValidEferMask))
5943 { /* likely */ }
5944 else
5945 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsrRsvd);
5946
5947 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
5948 bool const fHostLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_LMA);
5949 bool const fHostLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_LME);
5950 if ( fHostInLongMode == fHostLma
5951 && fHostInLongMode == fHostLme)
5952 { /* likely */ }
5953 else
5954 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsr);
5955
5956 /*
5957 * Host Segment and Descriptor-Table Registers.
5958 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
5959 */
5960 /* Selector RPL and TI. */
5961 if ( !(pVmcs->HostCs & (X86_SEL_RPL | X86_SEL_LDT))
5962 && !(pVmcs->HostSs & (X86_SEL_RPL | X86_SEL_LDT))
5963 && !(pVmcs->HostDs & (X86_SEL_RPL | X86_SEL_LDT))
5964 && !(pVmcs->HostEs & (X86_SEL_RPL | X86_SEL_LDT))
5965 && !(pVmcs->HostFs & (X86_SEL_RPL | X86_SEL_LDT))
5966 && !(pVmcs->HostGs & (X86_SEL_RPL | X86_SEL_LDT))
5967 && !(pVmcs->HostTr & (X86_SEL_RPL | X86_SEL_LDT)))
5968 { /* likely */ }
5969 else
5970 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSel);
5971
5972 /* CS and TR selectors cannot be 0. */
5973 if ( pVmcs->HostCs
5974 && pVmcs->HostTr)
5975 { /* likely */ }
5976 else
5977 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCsTr);
5978
5979 /* SS cannot be 0 if 32-bit host. */
5980 if ( fHostInLongMode
5981 || pVmcs->HostSs)
5982 { /* likely */ }
5983 else
5984 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSs);
5985
5986 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5987 {
5988 /* FS, GS, GDTR, IDTR, TR base address. */
5989 if ( X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
5990 && X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
5991 && X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u)
5992 && X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u)
5993 && X86_IS_CANONICAL(pVmcs->u64HostTrBase.u))
5994 { /* likely */ }
5995 else
5996 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSegBase);
5997 }
5998
5999 /*
6000 * Host address-space size for 64-bit CPUs.
6001 * See Intel spec. 26.2.4 "Checks Related to Address-Space Size".
6002 */
6003 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
6004 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6005 {
6006 bool const fCpuInLongMode = CPUMIsGuestInLongMode(pVCpu);
6007
6008 /* Logical processor in IA-32e mode. */
6009 if (fCpuInLongMode)
6010 {
6011 if (fHostInLongMode)
6012 {
6013 /* PAE must be set. */
6014 if (pVmcs->u64HostCr4.u & X86_CR4_PAE)
6015 { /* likely */ }
6016 else
6017 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pae);
6018
6019 /* RIP must be canonical. */
6020 if (X86_IS_CANONICAL(pVmcs->u64HostRip.u))
6021 { /* likely */ }
6022 else
6023 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRip);
6024 }
6025 else
6026 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostLongMode);
6027 }
6028 else
6029 {
6030 /* Logical processor is outside IA-32e mode. */
6031 if ( !fGstInLongMode
6032 && !fHostInLongMode)
6033 {
6034 /* PCIDE should not be set. */
6035 if (!(pVmcs->u64HostCr4.u & X86_CR4_PCIDE))
6036 { /* likely */ }
6037 else
6038 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pcide);
6039
6040 /* The high 32-bits of RIP MBZ. */
6041 if (!pVmcs->u64HostRip.s.Hi)
6042 { /* likely */ }
6043 else
6044 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRipRsvd);
6045 }
6046 else
6047 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongMode);
6048 }
6049 }
6050 else
6051 {
6052 /* Host address-space size for 32-bit CPUs. */
6053 if ( !fGstInLongMode
6054 && !fHostInLongMode)
6055 { /* likely */ }
6056 else
6057 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongModeNoCpu);
6058 }
6059
6060 NOREF(pszInstr);
6061 NOREF(pszFailure);
6062 return VINF_SUCCESS;
6063}
6064
6065
6066/**
6067 * Checks VM-entry controls fields as part of VM-entry.
6068 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
6069 *
6070 * @returns VBox status code.
6071 * @param pVCpu The cross context virtual CPU structure.
6072 * @param pszInstr The VMX instruction name (for logging purposes).
6073 */
6074IEM_STATIC int iemVmxVmentryCheckEntryCtls(PVMCPU pVCpu, const char *pszInstr)
6075{
6076 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6077 const char * const pszFailure = "VMFail";
6078
6079 /* VM-entry controls. */
6080 VMXCTLSMSR const EntryCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.EntryCtls;
6081 if (!(~pVmcs->u32EntryCtls & EntryCtls.n.allowed0))
6082 { /* likely */ }
6083 else
6084 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
6085
6086 if (!(pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1))
6087 { /* likely */ }
6088 else
6089 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1);
6090
6091 /* Event injection. */
6092 uint32_t const uIntInfo = pVmcs->u32EntryIntInfo;
6093 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VALID))
6094 {
6095 /* Type and vector. */
6096 uint8_t const uType = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_TYPE);
6097 uint8_t const uVector = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VECTOR);
6098 uint8_t const uRsvd = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_RSVD_12_30);
6099 if ( !uRsvd
6100 && HMVmxIsEntryIntInfoTypeValid(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxMonitorTrapFlag, uType)
6101 && HMVmxIsEntryIntInfoVectorValid(uVector, uType))
6102 { /* likely */ }
6103 else
6104 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd);
6105
6106 /* Exception error code. */
6107 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID))
6108 {
6109 /* Delivery possible only in Unrestricted-guest mode when CR0.PE is set. */
6110 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
6111 || (pVmcs->u64GuestCr0.s.Lo & X86_CR0_PE))
6112 { /* likely */ }
6113 else
6114 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodePe);
6115
6116 /* Exceptions that provide an error code. */
6117 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
6118 && ( uVector == X86_XCPT_DF
6119 || uVector == X86_XCPT_TS
6120 || uVector == X86_XCPT_NP
6121 || uVector == X86_XCPT_SS
6122 || uVector == X86_XCPT_GP
6123 || uVector == X86_XCPT_PF
6124 || uVector == X86_XCPT_AC))
6125 { /* likely */ }
6126 else
6127 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec);
6128
6129 /* Exception error-code reserved bits. */
6130 if (!(pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK))
6131 { /* likely */ }
6132 else
6133 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd);
6134
6135 /* Injecting a software interrupt, software exception or privileged software exception. */
6136 if ( uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
6137 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
6138 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
6139 {
6140 /* Instruction length must be in the range 0-15. */
6141 if (pVmcs->u32EntryInstrLen <= VMX_ENTRY_INSTR_LEN_MAX)
6142 { /* likely */ }
6143 else
6144 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLen);
6145
6146 /* Instruction length of 0 is allowed only when its CPU feature is present. */
6147 if ( pVmcs->u32EntryInstrLen == 0
6148 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEntryInjectSoftInt)
6149 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLenZero);
6150 }
6151 }
6152 }
6153
6154 /* VM-entry MSR-load count and VM-entry MSR-load area address. */
6155 if (pVmcs->u32EntryMsrLoadCount)
6156 {
6157 if ( !(pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
6158 && !(pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6159 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
6160 { /* likely */ }
6161 else
6162 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad);
6163 }
6164
6165 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)); /* We don't support SMM yet. */
6166 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON)); /* We don't support dual-monitor treatment yet. */
6167
6168 NOREF(pszInstr);
6169 NOREF(pszFailure);
6170 return VINF_SUCCESS;
6171}
6172
6173
6174/**
6175 * Checks VM-exit controls fields as part of VM-entry.
6176 * See Intel spec. 26.2.1.2 "VM-Exit Control Fields".
6177 *
6178 * @returns VBox status code.
6179 * @param pVCpu The cross context virtual CPU structure.
6180 * @param pszInstr The VMX instruction name (for logging purposes).
6181 */
6182IEM_STATIC int iemVmxVmentryCheckExitCtls(PVMCPU pVCpu, const char *pszInstr)
6183{
6184 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6185 const char * const pszFailure = "VMFail";
6186
6187 /* VM-exit controls. */
6188 VMXCTLSMSR const ExitCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ExitCtls;
6189 if (!(~pVmcs->u32ExitCtls & ExitCtls.n.allowed0))
6190 { /* likely */ }
6191 else
6192 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
6193
6194 if (!(pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1))
6195 { /* likely */ }
6196 else
6197 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1);
6198
6199 /* Save preemption timer without activating it. */
6200 if ( (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
6201 || !(pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
6202 { /* likely */ }
6203 else
6204 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer);
6205
6206 /* VM-exit MSR-store count and VM-exit MSR-store area address. */
6207 if (pVmcs->u32ExitMsrStoreCount)
6208 {
6209 if ( !(pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
6210 && !(pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6211 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
6212 { /* likely */ }
6213 else
6214 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore);
6215 }
6216
6217 /* VM-exit MSR-load count and VM-exit MSR-load area address. */
6218 if (pVmcs->u32ExitMsrLoadCount)
6219 {
6220 if ( !(pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
6221 && !(pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6222 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
6223 { /* likely */ }
6224 else
6225 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad);
6226 }
6227
6228 NOREF(pszInstr);
6229 NOREF(pszFailure);
6230 return VINF_SUCCESS;
6231}
6232
6233
6234/**
6235 * Checks VM-execution controls fields as part of VM-entry.
6236 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
6237 *
6238 * @returns VBox status code.
6239 * @param pVCpu The cross context virtual CPU structure.
6240 * @param pszInstr The VMX instruction name (for logging purposes).
6241 *
6242 * @remarks This may update secondary-processor based VM-execution control fields
6243 * in the current VMCS if necessary.
6244 */
6245IEM_STATIC int iemVmxVmentryCheckExecCtls(PVMCPU pVCpu, const char *pszInstr)
6246{
6247 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6248 const char * const pszFailure = "VMFail";
6249
6250 /* Pin-based VM-execution controls. */
6251 {
6252 VMXCTLSMSR const PinCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.PinCtls;
6253 if (!(~pVmcs->u32PinCtls & PinCtls.n.allowed0))
6254 { /* likely */ }
6255 else
6256 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
6257
6258 if (!(pVmcs->u32PinCtls & ~PinCtls.n.allowed1))
6259 { /* likely */ }
6260 else
6261 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1);
6262 }
6263
6264 /* Processor-based VM-execution controls. */
6265 {
6266 VMXCTLSMSR const ProcCtls = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls;
6267 if (!(~pVmcs->u32ProcCtls & ProcCtls.n.allowed0))
6268 { /* likely */ }
6269 else
6270 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
6271
6272 if (!(pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1))
6273 { /* likely */ }
6274 else
6275 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1);
6276 }
6277
6278 /* Secondary processor-based VM-execution controls. */
6279 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
6280 {
6281 VMXCTLSMSR const ProcCtls2 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.ProcCtls2;
6282 if (!(~pVmcs->u32ProcCtls2 & ProcCtls2.n.allowed0))
6283 { /* likely */ }
6284 else
6285 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
6286
6287 if (!(pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1))
6288 { /* likely */ }
6289 else
6290 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1);
6291 }
6292 else
6293 Assert(!pVmcs->u32ProcCtls2);
6294
6295 /* CR3-target count. */
6296 if (pVmcs->u32Cr3TargetCount <= VMX_V_CR3_TARGET_COUNT)
6297 { /* likely */ }
6298 else
6299 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Cr3TargetCount);
6300
6301 /* I/O bitmaps physical addresses. */
6302 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
6303 {
6304 if ( !(pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK)
6305 && !(pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6306 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u))
6307 { /* likely */ }
6308 else
6309 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA);
6310
6311 if ( !(pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK)
6312 && !(pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6313 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u))
6314 { /* likely */ }
6315 else
6316 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
6317 }
6318
6319 /* MSR bitmap physical address. */
6320 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
6321 {
6322 RTGCPHYS const GCPhysMsrBitmap = pVmcs->u64AddrMsrBitmap.u;
6323 if ( !(GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
6324 && !(GCPhysMsrBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6325 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysMsrBitmap))
6326 { /* likely */ }
6327 else
6328 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
6329
6330 /* Read the MSR bitmap. */
6331 /** @todo NSTVMX: Move this to be done later (while loading guest state) when
6332 * implementing fast path. */
6333 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
6334 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap),
6335 GCPhysMsrBitmap, VMX_V_MSR_BITMAP_SIZE);
6336 if (RT_SUCCESS(rc))
6337 { /* likely */ }
6338 else
6339 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrBitmapPtrReadPhys);
6340 }
6341
6342 /* TPR shadow related controls. */
6343 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
6344 {
6345 /* Virtual-APIC page physical address. */
6346 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
6347 if ( !(GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
6348 && !(GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6349 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
6350 { /* likely */ }
6351 else
6352 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
6353
6354 /* TPR threshold without virtual-interrupt delivery. */
6355 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
6356 && (pVmcs->u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK))
6357 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdRsvd);
6358
6359 /* TPR threshold and VTPR. */
6360 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
6361 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
6362 {
6363 /* Read the VTPR from the virtual-APIC page. */
6364 uint8_t u8VTpr;
6365 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &u8VTpr, GCPhysVirtApic + XAPIC_OFF_TPR, sizeof(u8VTpr));
6366 if (RT_SUCCESS(rc))
6367 { /* likely */ }
6368 else
6369 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
6370
6371 /* Bits 3:0 of the TPR-threshold must not be greater than bits 7:4 of VTPR. */
6372 if ((uint8_t)RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) <= (u8VTpr & 0xf0))
6373 { /* likely */ }
6374 else
6375 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
6376 }
6377 }
6378 else
6379 {
6380 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6381 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
6382 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
6383 { /* likely */ }
6384 else
6385 {
6386 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6387 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicTprShadow);
6388 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
6389 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ApicRegVirt);
6390 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
6391 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtIntDelivery);
6392 }
6393 }
6394
6395 /* NMI exiting and virtual-NMIs. */
6396 if ( (pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
6397 || !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
6398 { /* likely */ }
6399 else
6400 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi);
6401
6402 /* Virtual-NMIs and NMI-window exiting. */
6403 if ( (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6404 || !(pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
6405 { /* likely */ }
6406 else
6407 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit);
6408
6409 /* Virtualize APIC accesses. */
6410 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
6411 {
6412 /* APIC-access physical address. */
6413 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
6414 if ( !(GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
6415 && !(GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6416 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
6417 { /* likely */ }
6418 else
6419 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
6420
6421 /*
6422 * Disallow APIC-access page and virtual-APIC page from being the same address.
6423 * Note! This is not an Intel requirement, but one imposed by our implementation.
6424 */
6425 /** @todo r=ramshankar: This is done primarily to simplify recursion scenarios while
6426 * redirecting accesses between the APIC-access page and the virtual-APIC
6427 * page. If any guest hypervisor requires this, we can implement it later. */
6428 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
6429 {
6430 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
6431 if (GCPhysVirtApic != GCPhysApicAccess)
6432 { /* likely */ }
6433 else
6434 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessEqVirtApic);
6435 }
6436
6437 /*
6438 * Register the handler for the APIC-access page.
6439 *
6440 * We don't deregister the APIC-access page handler during the VM-exit as a different
6441 * nested-VCPU might be using the same guest-physical address for its APIC-access page.
6442 *
6443 * We leave the page registered until the first access that happens outside VMX non-root
6444 * mode. Guest software is allowed to access structures such as the APIC-access page
6445 * only when no logical processor with a current VMCS references it in VMX non-root mode,
6446 * otherwise it can lead to unpredictable behavior including guest triple-faults.
6447 *
6448 * See Intel spec. 24.11.4 "Software Access to Related Structures".
6449 */
6450 int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess,
6451 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
6452 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
6453 if (RT_SUCCESS(rc))
6454 { /* likely */ }
6455 else
6456 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessHandlerReg);
6457 }
6458
6459 /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */
6460 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6461 || !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
6462 { /* likely */ }
6463 else
6464 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
6465
6466 /* Virtual-interrupt delivery requires external interrupt exiting. */
6467 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
6468 || (pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
6469 { /* likely */ }
6470 else
6471 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
6472
6473 /* VPID. */
6474 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID)
6475 || pVmcs->u16Vpid != 0)
6476 { /* likely */ }
6477 else
6478 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Vpid);
6479
6480 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_POSTED_INT)); /* We don't support posted interrupts yet. */
6481 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)); /* We don't support EPT yet. */
6482 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PML)); /* We don't support PML yet. */
6483 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)); /* We don't support Unrestricted-guests yet. */
6484 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMFUNC)); /* We don't support VM functions yet. */
6485 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT_VE)); /* We don't support EPT-violation #VE yet. */
6486 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)); /* We don't support Pause-loop exiting yet. */
6487
6488 /* VMCS shadowing. */
6489 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
6490 {
6491 /* VMREAD-bitmap physical address. */
6492 RTGCPHYS const GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
6493 if ( !(GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
6494 && !(GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6495 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
6496 { /* likely */ }
6497 else
6498 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap);
6499
6500 /* VMWRITE-bitmap physical address. */
6501 RTGCPHYS const GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
6502 if ( !(GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
6503 && !(GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6504 && PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
6505 { /* likely */ }
6506 else
6507 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
6508
6509 /* Read the VMREAD-bitmap. */
6510 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
6511 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap),
6512 GCPhysVmreadBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
6513 if (RT_SUCCESS(rc))
6514 { /* likely */ }
6515 else
6516 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys);
6517
6518 /* Read the VMWRITE-bitmap. */
6519 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap));
6520 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap),
6521 GCPhysVmwriteBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
6522 if (RT_SUCCESS(rc))
6523 { /* likely */ }
6524 else
6525 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys);
6526 }
6527
6528 NOREF(pszInstr);
6529 NOREF(pszFailure);
6530 return VINF_SUCCESS;
6531}
6532
6533
6534/**
6535 * Loads the guest control registers, debug register and some MSRs as part of
6536 * VM-entry.
6537 *
6538 * @param pVCpu The cross context virtual CPU structure.
6539 */
6540IEM_STATIC void iemVmxVmentryLoadGuestControlRegsMsrs(PVMCPU pVCpu)
6541{
6542 /*
6543 * Load guest control registers, debug registers and MSRs.
6544 * See Intel spec. 26.3.2.1 "Loading Guest Control Registers, Debug Registers and MSRs".
6545 */
6546 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6547
6548 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
6549 uint64_t const uGstCr0 = (pVmcs->u64GuestCr0.u & ~VMX_ENTRY_CR0_IGNORE_MASK)
6550 | (pVCpu->cpum.GstCtx.cr0 & VMX_ENTRY_CR0_IGNORE_MASK);
6551 CPUMSetGuestCR0(pVCpu, uGstCr0);
6552 CPUMSetGuestCR4(pVCpu, pVmcs->u64GuestCr4.u);
6553 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64GuestCr3.u;
6554
6555 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
6556 pVCpu->cpum.GstCtx.dr[7] = (pVmcs->u64GuestDr7.u & ~VMX_ENTRY_DR7_MBZ_MASK) | VMX_ENTRY_DR7_MB1_MASK;
6557
6558 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64GuestSysenterEip.s.Lo;
6559 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64GuestSysenterEsp.s.Lo;
6560 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32GuestSysenterCS;
6561
6562 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6563 {
6564 /* FS base and GS base are loaded while loading the rest of the guest segment registers. */
6565
6566 /* EFER MSR. */
6567 if (!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR))
6568 {
6569 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_EFER);
6570 uint64_t const uHostEfer = pVCpu->cpum.GstCtx.msrEFER;
6571 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
6572 bool const fGstPaging = RT_BOOL(uGstCr0 & X86_CR0_PG);
6573 if (fGstInLongMode)
6574 {
6575 /* If the nested-guest is in long mode, LMA and LME are both set. */
6576 Assert(fGstPaging);
6577 pVCpu->cpum.GstCtx.msrEFER = uHostEfer | (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
6578 }
6579 else
6580 {
6581 /*
6582 * If the nested-guest is outside long mode:
6583 * - With paging: LMA is cleared, LME is cleared.
6584 * - Without paging: LMA is cleared, LME is left unmodified.
6585 */
6586 uint64_t const fLmaLmeMask = MSR_K6_EFER_LMA | (fGstPaging ? MSR_K6_EFER_LME : 0);
6587 pVCpu->cpum.GstCtx.msrEFER = uHostEfer & ~fLmaLmeMask;
6588 }
6589 }
6590 /* else: see below. */
6591 }
6592
6593 /* PAT MSR. */
6594 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
6595 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64GuestPatMsr.u;
6596
6597 /* EFER MSR. */
6598 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
6599 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64GuestEferMsr.u;
6600
6601 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
6602 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
6603
6604 /* We don't support IA32_BNDCFGS MSR yet. */
6605 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
6606
6607 /* Nothing to do for SMBASE register - We don't support SMM yet. */
6608}
6609
6610
6611/**
6612 * Loads the guest segment registers, GDTR, IDTR, LDTR and TR as part of VM-entry.
6613 *
6614 * @param pVCpu The cross context virtual CPU structure.
6615 */
6616IEM_STATIC void iemVmxVmentryLoadGuestSegRegs(PVMCPU pVCpu)
6617{
6618 /*
6619 * Load guest segment registers, GDTR, IDTR, LDTR and TR.
6620 * See Intel spec. 26.3.2.2 "Loading Guest Segment Registers and Descriptor-Table Registers".
6621 */
6622 /* CS, SS, ES, DS, FS, GS. */
6623 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6624 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
6625 {
6626 PCPUMSELREG pGstSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6627 CPUMSELREG VmcsSelReg;
6628 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &VmcsSelReg);
6629 AssertRC(rc); NOREF(rc);
6630 if (!(VmcsSelReg.Attr.u & X86DESCATTR_UNUSABLE))
6631 {
6632 pGstSelReg->Sel = VmcsSelReg.Sel;
6633 pGstSelReg->ValidSel = VmcsSelReg.Sel;
6634 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6635 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6636 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
6637 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
6638 }
6639 else
6640 {
6641 pGstSelReg->Sel = VmcsSelReg.Sel;
6642 pGstSelReg->ValidSel = VmcsSelReg.Sel;
6643 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6644 switch (iSegReg)
6645 {
6646 case X86_SREG_CS:
6647 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6648 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
6649 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
6650 break;
6651
6652 case X86_SREG_SS:
6653 pGstSelReg->u64Base = VmcsSelReg.u64Base & UINT32_C(0xfffffff0);
6654 pGstSelReg->u32Limit = 0;
6655 pGstSelReg->Attr.u = (VmcsSelReg.Attr.u & X86DESCATTR_DPL) | X86DESCATTR_D | X86DESCATTR_UNUSABLE;
6656 break;
6657
6658 case X86_SREG_ES:
6659 case X86_SREG_DS:
6660 pGstSelReg->u64Base = 0;
6661 pGstSelReg->u32Limit = 0;
6662 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
6663 break;
6664
6665 case X86_SREG_FS:
6666 case X86_SREG_GS:
6667 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6668 pGstSelReg->u32Limit = 0;
6669 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
6670 break;
6671 }
6672 Assert(pGstSelReg->Attr.n.u1Unusable);
6673 }
6674 }
6675
6676 /* LDTR. */
6677 pVCpu->cpum.GstCtx.ldtr.Sel = pVmcs->GuestLdtr;
6678 pVCpu->cpum.GstCtx.ldtr.ValidSel = pVmcs->GuestLdtr;
6679 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
6680 if (!(pVmcs->u32GuestLdtrAttr & X86DESCATTR_UNUSABLE))
6681 {
6682 pVCpu->cpum.GstCtx.ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
6683 pVCpu->cpum.GstCtx.ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
6684 pVCpu->cpum.GstCtx.ldtr.Attr.u = pVmcs->u32GuestLdtrAttr;
6685 }
6686 else
6687 {
6688 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
6689 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
6690 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
6691 }
6692
6693 /* TR. */
6694 Assert(!(pVmcs->u32GuestTrAttr & X86DESCATTR_UNUSABLE));
6695 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->GuestTr;
6696 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->GuestTr;
6697 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
6698 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64GuestTrBase.u;
6699 pVCpu->cpum.GstCtx.tr.u32Limit = pVmcs->u32GuestTrLimit;
6700 pVCpu->cpum.GstCtx.tr.Attr.u = pVmcs->u32GuestTrAttr;
6701
6702 /* GDTR. */
6703 pVCpu->cpum.GstCtx.gdtr.cbGdt = pVmcs->u32GuestGdtrLimit;
6704 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64GuestGdtrBase.u;
6705
6706 /* IDTR. */
6707 pVCpu->cpum.GstCtx.idtr.cbIdt = pVmcs->u32GuestIdtrLimit;
6708 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64GuestIdtrBase.u;
6709}
6710
6711
6712/**
6713 * Loads the guest MSRs from the VM-entry MSR-load area as part of VM-entry.
6714 *
6715 * @returns VBox status code.
6716 * @param pVCpu The cross context virtual CPU structure.
6717 * @param pszInstr The VMX instruction name (for logging purposes).
6718 */
6719IEM_STATIC int iemVmxVmentryLoadGuestAutoMsrs(PVMCPU pVCpu, const char *pszInstr)
6720{
6721 /*
6722 * Load guest MSRs.
6723 * See Intel spec. 26.4 "Loading MSRs".
6724 */
6725 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6726 const char *const pszFailure = "VM-exit";
6727
6728 /*
6729 * The VM-entry MSR-load area address need not be a valid guest-physical address if the
6730 * VM-entry MSR load count is 0. If this is the case, bail early without reading it.
6731 * See Intel spec. 24.8.2 "VM-Entry Controls for MSRs".
6732 */
6733 uint32_t const cMsrs = pVmcs->u32EntryMsrLoadCount;
6734 if (!cMsrs)
6735 return VINF_SUCCESS;
6736
6737 /*
6738 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count is
6739 * exceeded including possibly raising #MC exceptions during VMX transition. Our
6740 * implementation shall fail VM-entry with an VMX_EXIT_ERR_MSR_LOAD VM-exit.
6741 */
6742 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
6743 if (fIsMsrCountValid)
6744 { /* likely */ }
6745 else
6746 {
6747 iemVmxVmcsSetExitQual(pVCpu, VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
6748 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadCount);
6749 }
6750
6751 RTGCPHYS const GCPhysVmEntryMsrLoadArea = pVmcs->u64AddrEntryMsrLoad.u;
6752 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pEntryMsrLoadArea),
6753 GCPhysVmEntryMsrLoadArea, cMsrs * sizeof(VMXAUTOMSR));
6754 if (RT_SUCCESS(rc))
6755 {
6756 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pEntryMsrLoadArea);
6757 Assert(pMsr);
6758 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
6759 {
6760 if ( !pMsr->u32Reserved
6761 && pMsr->u32Msr != MSR_K8_FS_BASE
6762 && pMsr->u32Msr != MSR_K8_GS_BASE
6763 && pMsr->u32Msr != MSR_K6_EFER
6764 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
6765 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
6766 {
6767 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
6768 if (rcStrict == VINF_SUCCESS)
6769 continue;
6770
6771 /*
6772 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-entry.
6773 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VM-entry failure
6774 * recording the MSR index in the Exit qualification (as per the Intel spec.) and indicated
6775 * further by our own, specific diagnostic code. Later, we can try implement handling of the
6776 * MSR in ring-0 if possible, or come up with a better, generic solution.
6777 */
6778 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
6779 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
6780 ? kVmxVDiag_Vmentry_MsrLoadRing3
6781 : kVmxVDiag_Vmentry_MsrLoad;
6782 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
6783 }
6784 else
6785 {
6786 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
6787 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadRsvd);
6788 }
6789 }
6790 }
6791 else
6792 {
6793 AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysVmEntryMsrLoadArea, rc));
6794 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadPtrReadPhys);
6795 }
6796
6797 NOREF(pszInstr);
6798 NOREF(pszFailure);
6799 return VINF_SUCCESS;
6800}
6801
6802
6803/**
6804 * Loads the guest-state non-register state as part of VM-entry.
6805 *
6806 * @returns VBox status code.
6807 * @param pVCpu The cross context virtual CPU structure.
6808 *
6809 * @remarks This must be called only after loading the nested-guest register state
6810 * (especially nested-guest RIP).
6811 */
6812IEM_STATIC void iemVmxVmentryLoadGuestNonRegState(PVMCPU pVCpu)
6813{
6814 /*
6815 * Load guest non-register state.
6816 * See Intel spec. 26.6 "Special Features of VM Entry"
6817 */
6818 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6819
6820 /*
6821 * If VM-entry is not vectoring, block-by-STI and block-by-MovSS state must be loaded.
6822 * If VM-entry is vectoring, there is no block-by-STI or block-by-MovSS.
6823 *
6824 * See Intel spec. 26.6.1 "Interruptibility State".
6825 */
6826 bool const fEntryVectoring = HMVmxIsVmentryVectoring(pVmcs->u32EntryIntInfo, NULL /* puEntryIntInfoType */);
6827 if ( !fEntryVectoring
6828 && (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)))
6829 EMSetInhibitInterruptsPC(pVCpu, pVmcs->u64GuestRip.u);
6830 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
6831 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
6832
6833 /* NMI blocking. */
6834 if (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
6835 {
6836 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6837 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = true;
6838 else
6839 {
6840 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false;
6841 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
6842 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6843 }
6844 }
6845 else
6846 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false;
6847
6848 /* SMI blocking is irrelevant. We don't support SMIs yet. */
6849
6850 /* Loading PDPTEs will be taken care when we switch modes. We don't support EPT yet. */
6851 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
6852
6853 /* VPID is irrelevant. We don't support VPID yet. */
6854
6855 /* Clear address-range monitoring. */
6856 EMMonitorWaitClear(pVCpu);
6857}
6858
6859
6860/**
6861 * Loads the guest-state as part of VM-entry.
6862 *
6863 * @returns VBox status code.
6864 * @param pVCpu The cross context virtual CPU structure.
6865 * @param pszInstr The VMX instruction name (for logging purposes).
6866 *
6867 * @remarks This must be done after all the necessary steps prior to loading of
6868 * guest-state (e.g. checking various VMCS state).
6869 */
6870IEM_STATIC int iemVmxVmentryLoadGuestState(PVMCPU pVCpu, const char *pszInstr)
6871{
6872 iemVmxVmentryLoadGuestControlRegsMsrs(pVCpu);
6873 iemVmxVmentryLoadGuestSegRegs(pVCpu);
6874
6875 /*
6876 * Load guest RIP, RSP and RFLAGS.
6877 * See Intel spec. 26.3.2.3 "Loading Guest RIP, RSP and RFLAGS".
6878 */
6879 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6880 pVCpu->cpum.GstCtx.rsp = pVmcs->u64GuestRsp.u;
6881 pVCpu->cpum.GstCtx.rip = pVmcs->u64GuestRip.u;
6882 pVCpu->cpum.GstCtx.rflags.u = pVmcs->u64GuestRFlags.u;
6883
6884 /* Initialize the PAUSE-loop controls as part of VM-entry. */
6885 pVCpu->cpum.GstCtx.hwvirt.vmx.uFirstPauseLoopTick = 0;
6886 pVCpu->cpum.GstCtx.hwvirt.vmx.uPrevPauseTick = 0;
6887
6888 iemVmxVmentryLoadGuestNonRegState(pVCpu);
6889
6890 NOREF(pszInstr);
6891 return VINF_SUCCESS;
6892}
6893
6894
6895/**
6896 * Returns whether there are is a pending debug exception on VM-entry.
6897 *
6898 * @param pVCpu The cross context virtual CPU structure.
6899 * @param pszInstr The VMX instruction name (for logging purposes).
6900 */
6901IEM_STATIC bool iemVmxVmentryIsPendingDebugXcpt(PVMCPU pVCpu, const char *pszInstr)
6902{
6903 /*
6904 * Pending debug exceptions.
6905 * See Intel spec. 26.6.3 "Delivery of Pending Debug Exceptions after VM Entry".
6906 */
6907 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6908 Assert(pVmcs);
6909
6910 bool fPendingDbgXcpt = RT_BOOL(pVmcs->u64GuestPendingDbgXcpt.u & ( VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS
6911 | VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_EN_BP));
6912 if (fPendingDbgXcpt)
6913 {
6914 uint8_t uEntryIntInfoType;
6915 bool const fEntryVectoring = HMVmxIsVmentryVectoring(pVmcs->u32EntryIntInfo, &uEntryIntInfoType);
6916 if (fEntryVectoring)
6917 {
6918 switch (uEntryIntInfoType)
6919 {
6920 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
6921 case VMX_ENTRY_INT_INFO_TYPE_NMI:
6922 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
6923 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT:
6924 fPendingDbgXcpt = false;
6925 break;
6926
6927 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT:
6928 {
6929 /*
6930 * Whether the pending debug exception for software exceptions other than
6931 * #BP and #OF is delivered after injecting the exception or is discard
6932 * is CPU implementation specific. We will discard them (easier).
6933 */
6934 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(pVmcs->u32EntryIntInfo);
6935 if ( uVector != X86_XCPT_BP
6936 && uVector != X86_XCPT_OF)
6937 fPendingDbgXcpt = false;
6938 RT_FALL_THRU();
6939 }
6940 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
6941 {
6942 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
6943 fPendingDbgXcpt = false;
6944 break;
6945 }
6946 }
6947 }
6948 else
6949 {
6950 /*
6951 * When the VM-entry is not vectoring but there is blocking-by-MovSS, whether the
6952 * pending debug exception is held pending or is discarded is CPU implementation
6953 * specific. We will discard them (easier).
6954 */
6955 if (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
6956 fPendingDbgXcpt = false;
6957
6958 /* There's no pending debug exception in the shutdown or wait-for-SIPI state. */
6959 if (pVmcs->u32GuestActivityState & (VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN | VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT))
6960 fPendingDbgXcpt = false;
6961 }
6962 }
6963
6964 NOREF(pszInstr);
6965 return fPendingDbgXcpt;
6966}
6967
6968
6969/**
6970 * Set up the monitor-trap flag (MTF).
6971 *
6972 * @param pVCpu The cross context virtual CPU structure.
6973 * @param pszInstr The VMX instruction name (for logging purposes).
6974 */
6975IEM_STATIC void iemVmxVmentrySetupMtf(PVMCPU pVCpu, const char *pszInstr)
6976{
6977 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6978 Assert(pVmcs);
6979 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_MONITOR_TRAP_FLAG)
6980 {
6981 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_MTF);
6982 Log(("%s: Monitor-trap flag set on VM-entry\n", pszInstr));
6983 }
6984 else
6985 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
6986 NOREF(pszInstr);
6987}
6988
6989
6990/**
6991 * Sets up NMI-window exiting.
6992 *
6993 * @param pVCpu The cross context virtual CPU structure.
6994 * @param pszInstr The VMX instruction name (for logging purposes).
6995 */
6996IEM_STATIC void iemVmxVmentrySetupNmiWindow(PVMCPU pVCpu, const char *pszInstr)
6997{
6998 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6999 Assert(pVmcs);
7000 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT)
7001 {
7002 Assert(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI);
7003 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW);
7004 Log(("%s: NMI-window set on VM-entry\n", pszInstr));
7005 }
7006 else
7007 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
7008 NOREF(pszInstr);
7009}
7010
7011
7012/**
7013 * Sets up interrupt-window exiting.
7014 *
7015 * @param pVCpu The cross context virtual CPU structure.
7016 * @param pszInstr The VMX instruction name (for logging purposes).
7017 */
7018IEM_STATIC void iemVmxVmentrySetupIntWindow(PVMCPU pVCpu, const char *pszInstr)
7019{
7020 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7021 Assert(pVmcs);
7022 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT)
7023 {
7024 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW);
7025 Log(("%s: Interrupt-window set on VM-entry\n", pszInstr));
7026 }
7027 else
7028 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
7029 NOREF(pszInstr);
7030}
7031
7032
7033/**
7034 * Set up the VMX-preemption timer.
7035 *
7036 * @param pVCpu The cross context virtual CPU structure.
7037 * @param pszInstr The VMX instruction name (for logging purposes).
7038 */
7039IEM_STATIC void iemVmxVmentrySetupPreemptTimer(PVMCPU pVCpu, const char *pszInstr)
7040{
7041 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7042 Assert(pVmcs);
7043 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
7044 {
7045 uint64_t const uEntryTick = TMCpuTickGetNoCheck(pVCpu);
7046 pVCpu->cpum.GstCtx.hwvirt.vmx.uEntryTick = uEntryTick;
7047 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER);
7048
7049 Log(("%s: VM-entry set up VMX-preemption timer at %#RX64\n", pszInstr, uEntryTick));
7050 }
7051 else
7052 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
7053
7054 NOREF(pszInstr);
7055}
7056
7057
7058/**
7059 * Injects an event using TRPM given a VM-entry interruption info. and related
7060 * fields.
7061 *
7062 * @returns VBox status code.
7063 * @param pVCpu The cross context virtual CPU structure.
7064 * @param uEntryIntInfo The VM-entry interruption info.
7065 * @param uErrCode The error code associated with the event if any.
7066 * @param cbInstr The VM-entry instruction length (for software
7067 * interrupts and software exceptions). Pass 0
7068 * otherwise.
7069 * @param GCPtrFaultAddress The guest CR2 if this is a \#PF event.
7070 */
7071IEM_STATIC int iemVmxVmentryInjectTrpmEvent(PVMCPU pVCpu, uint32_t uEntryIntInfo, uint32_t uErrCode, uint32_t cbInstr,
7072 RTGCUINTPTR GCPtrFaultAddress)
7073{
7074 Assert(VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo));
7075
7076 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
7077 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(uEntryIntInfo);
7078 bool const fErrCodeValid = VMX_ENTRY_INT_INFO_IS_ERROR_CODE_VALID(uEntryIntInfo);
7079
7080 TRPMEVENT enmTrapType;
7081 switch (uType)
7082 {
7083 case VMX_ENTRY_INT_INFO_TYPE_EXT_INT:
7084 enmTrapType = TRPM_HARDWARE_INT;
7085 break;
7086
7087 case VMX_ENTRY_INT_INFO_TYPE_NMI:
7088 case VMX_ENTRY_INT_INFO_TYPE_HW_XCPT:
7089 enmTrapType = TRPM_TRAP;
7090 break;
7091
7092 case VMX_ENTRY_INT_INFO_TYPE_SW_INT:
7093 enmTrapType = TRPM_SOFTWARE_INT;
7094 break;
7095
7096 case VMX_ENTRY_INT_INFO_TYPE_SW_XCPT: /* #BP and #OF */
7097 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
7098 enmTrapType = TRPM_SOFTWARE_INT;
7099 break;
7100
7101 case VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT: /* #DB (INT1/ICEBP). */
7102 Assert(uVector == X86_XCPT_DB);
7103 enmTrapType = TRPM_SOFTWARE_INT;
7104 break;
7105
7106 default:
7107 /* Shouldn't really happen. */
7108 AssertMsgFailedReturn(("Invalid trap type %#x\n", uType), VERR_VMX_IPE_4);
7109 break;
7110 }
7111
7112 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
7113 AssertRCReturn(rc, rc);
7114
7115 if (fErrCodeValid)
7116 TRPMSetErrorCode(pVCpu, uErrCode);
7117
7118 if ( enmTrapType == TRPM_TRAP
7119 && uVector == X86_XCPT_PF)
7120 TRPMSetFaultAddress(pVCpu, GCPtrFaultAddress);
7121 else if (enmTrapType == TRPM_SOFTWARE_INT)
7122 TRPMSetInstrLength(pVCpu, cbInstr);
7123
7124 return VINF_SUCCESS;
7125}
7126
7127
7128/**
7129 * Performs event injection (if any) as part of VM-entry.
7130 *
7131 * @param pVCpu The cross context virtual CPU structure.
7132 * @param pszInstr The VMX instruction name (for logging purposes).
7133 */
7134IEM_STATIC int iemVmxVmentryInjectEvent(PVMCPU pVCpu, const char *pszInstr)
7135{
7136 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7137
7138 /*
7139 * Inject events.
7140 * The event that is going to be made pending for injection is not subject to VMX intercepts,
7141 * thus we flag ignoring of intercepts. However, recursive exceptions if any during delivery
7142 * of the current event -are- subject to intercepts, hence this flag will be flipped during
7143 * the actually delivery of this event.
7144 *
7145 * See Intel spec. 26.5 "Event Injection".
7146 */
7147 uint32_t const uEntryIntInfo = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u32EntryIntInfo;
7148 bool const fEntryIntInfoValid = VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo);
7149
7150 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = !fEntryIntInfoValid;
7151 if (fEntryIntInfoValid)
7152 {
7153 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
7154 if (uType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT)
7155 {
7156 Assert(VMX_ENTRY_INT_INFO_VECTOR(uEntryIntInfo) == VMX_ENTRY_INT_INFO_VECTOR_MTF);
7157 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_MTF);
7158 return VINF_SUCCESS;
7159 }
7160
7161 int rc = iemVmxVmentryInjectTrpmEvent(pVCpu, uEntryIntInfo, pVmcs->u32EntryXcptErrCode, pVmcs->u32EntryInstrLen,
7162 pVCpu->cpum.GstCtx.cr2);
7163 if (RT_SUCCESS(rc))
7164 {
7165 /*
7166 * We need to clear the VM-entry interruption information field's valid bit on VM-exit.
7167 *
7168 * However, we do it here on VM-entry because while it continues to not be visible to
7169 * guest software until VM-exit, when HM looks at the VMCS to continue nested-guest
7170 * execution using hardware-assisted VT-x, it can simply copy the VM-entry interruption
7171 * information field.
7172 *
7173 * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
7174 */
7175 pVmcs->u32EntryIntInfo &= ~VMX_ENTRY_INT_INFO_VALID;
7176 }
7177 return rc;
7178 }
7179
7180 /*
7181 * Inject any pending guest debug exception.
7182 * Unlike injecting events, this #DB injection on VM-entry is subject to #DB VMX intercept.
7183 * See Intel spec. 26.6.3 "Delivery of Pending Debug Exceptions after VM Entry".
7184 */
7185 bool const fPendingDbgXcpt = iemVmxVmentryIsPendingDebugXcpt(pVCpu, pszInstr);
7186 if (fPendingDbgXcpt)
7187 {
7188 uint32_t const uDbgXcptInfo = RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VECTOR, X86_XCPT_DB)
7189 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_TYPE, VMX_ENTRY_INT_INFO_TYPE_HW_XCPT)
7190 | RT_BF_MAKE(VMX_BF_ENTRY_INT_INFO_VALID, 1);
7191 return iemVmxVmentryInjectTrpmEvent(pVCpu, uDbgXcptInfo, 0 /* uErrCode */, pVmcs->u32EntryInstrLen,
7192 0 /* GCPtrFaultAddress */);
7193 }
7194
7195 NOREF(pszInstr);
7196 return VINF_SUCCESS;
7197}
7198
7199
7200/**
7201 * Initializes all read-only VMCS fields as part of VM-entry.
7202 *
7203 * @param pVCpu The cross context virtual CPU structure.
7204 */
7205IEM_STATIC void iemVmxVmentryInitReadOnlyFields(PVMCPU pVCpu)
7206{
7207 /*
7208 * Any VMCS field which we do not establish on every VM-exit but may potentially
7209 * be used on the VM-exit path of a guest hypervisor -and- is not explicitly
7210 * specified to be undefined needs to be initialized here.
7211 *
7212 * Thus, it is especially important to clear the Exit qualification field
7213 * since it must be zero for VM-exits where it is not used. Similarly, the
7214 * VM-exit interruption information field's valid bit needs to be cleared for
7215 * the same reasons.
7216 */
7217 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7218 Assert(pVmcs);
7219
7220 /* 16-bit (none currently). */
7221 /* 32-bit. */
7222 pVmcs->u32RoVmInstrError = 0;
7223 pVmcs->u32RoExitReason = 0;
7224 pVmcs->u32RoExitIntInfo = 0;
7225 pVmcs->u32RoExitIntErrCode = 0;
7226 pVmcs->u32RoIdtVectoringInfo = 0;
7227 pVmcs->u32RoIdtVectoringErrCode = 0;
7228 pVmcs->u32RoExitInstrLen = 0;
7229 pVmcs->u32RoExitInstrInfo = 0;
7230
7231 /* 64-bit. */
7232 pVmcs->u64RoGuestPhysAddr.u = 0;
7233
7234 /* Natural-width. */
7235 pVmcs->u64RoExitQual.u = 0;
7236 pVmcs->u64RoIoRcx.u = 0;
7237 pVmcs->u64RoIoRsi.u = 0;
7238 pVmcs->u64RoIoRdi.u = 0;
7239 pVmcs->u64RoIoRip.u = 0;
7240 pVmcs->u64RoGuestLinearAddr.u = 0;
7241}
7242
7243
7244/**
7245 * VMLAUNCH/VMRESUME instruction execution worker.
7246 *
7247 * @returns Strict VBox status code.
7248 * @param pVCpu The cross context virtual CPU structure.
7249 * @param cbInstr The instruction length in bytes.
7250 * @param uInstrId The instruction identity (VMXINSTRID_VMLAUNCH or
7251 * VMXINSTRID_VMRESUME).
7252 *
7253 * @remarks Common VMX instruction checks are already expected to by the caller,
7254 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7255 */
7256IEM_STATIC VBOXSTRICTRC iemVmxVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
7257{
7258# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
7259 RT_NOREF3(pVCpu, cbInstr, uInstrId);
7260 return VINF_EM_RAW_EMULATE_INSTR;
7261# else
7262 Assert( uInstrId == VMXINSTRID_VMLAUNCH
7263 || uInstrId == VMXINSTRID_VMRESUME);
7264 const char *pszInstr = uInstrId == VMXINSTRID_VMRESUME ? "vmresume" : "vmlaunch";
7265
7266 /* Nested-guest intercept. */
7267 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7268 return iemVmxVmexitInstr(pVCpu, uInstrId == VMXINSTRID_VMRESUME ? VMX_EXIT_VMRESUME : VMX_EXIT_VMLAUNCH, cbInstr);
7269
7270 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7271
7272 /*
7273 * Basic VM-entry checks.
7274 * The order of the CPL, current and shadow VMCS and block-by-MovSS are important.
7275 * The checks following that do not have to follow a specific order.
7276 *
7277 * See Intel spec. 26.1 "Basic VM-entry Checks".
7278 */
7279
7280 /* CPL. */
7281 if (pVCpu->iem.s.uCpl == 0)
7282 { /* likely */ }
7283 else
7284 {
7285 Log(("%s: CPL %u -> #GP(0)\n", pszInstr, pVCpu->iem.s.uCpl));
7286 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_Cpl;
7287 return iemRaiseGeneralProtectionFault0(pVCpu);
7288 }
7289
7290 /* Current VMCS valid. */
7291 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
7292 { /* likely */ }
7293 else
7294 {
7295 Log(("%s: VMCS pointer %#RGp invalid -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7296 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrInvalid;
7297 iemVmxVmFailInvalid(pVCpu);
7298 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7299 return VINF_SUCCESS;
7300 }
7301
7302 /* Current VMCS is not a shadow VMCS. */
7303 if (!pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u32VmcsRevId.n.fIsShadowVmcs)
7304 { /* likely */ }
7305 else
7306 {
7307 Log(("%s: VMCS pointer %#RGp is a shadow VMCS -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7308 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrShadowVmcs;
7309 iemVmxVmFailInvalid(pVCpu);
7310 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7311 return VINF_SUCCESS;
7312 }
7313
7314 /** @todo Distinguish block-by-MovSS from block-by-STI. Currently we
7315 * use block-by-STI here which is not quite correct. */
7316 if ( !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
7317 || pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
7318 { /* likely */ }
7319 else
7320 {
7321 Log(("%s: VM entry with events blocked by MOV SS -> VMFail\n", pszInstr));
7322 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_BlocKMovSS;
7323 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_BLOCK_MOVSS);
7324 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7325 return VINF_SUCCESS;
7326 }
7327
7328 if (uInstrId == VMXINSTRID_VMLAUNCH)
7329 {
7330 /* VMLAUNCH with non-clear VMCS. */
7331 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_LAUNCH_STATE_CLEAR)
7332 { /* likely */ }
7333 else
7334 {
7335 Log(("vmlaunch: VMLAUNCH with non-clear VMCS -> VMFail\n"));
7336 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsClear;
7337 iemVmxVmFail(pVCpu, VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS);
7338 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7339 return VINF_SUCCESS;
7340 }
7341 }
7342 else
7343 {
7344 /* VMRESUME with non-launched VMCS. */
7345 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_LAUNCH_STATE_LAUNCHED)
7346 { /* likely */ }
7347 else
7348 {
7349 Log(("vmresume: VMRESUME with non-launched VMCS -> VMFail\n"));
7350 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsLaunch;
7351 iemVmxVmFail(pVCpu, VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS);
7352 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7353 return VINF_SUCCESS;
7354 }
7355 }
7356
7357 /*
7358 * We are allowed to cache VMCS related data structures (such as I/O bitmaps, MSR bitmaps)
7359 * while entering VMX non-root mode. We do some of this while checking VM-execution
7360 * controls. The guest hypervisor should not make assumptions and cannot expect
7361 * predictable behavior if changes to these structures are made in guest memory while
7362 * executing in VMX non-root mode. As far as VirtualBox is concerned, the guest cannot
7363 * modify them anyway as we cache them in host memory. We are trade memory for speed here.
7364 *
7365 * See Intel spec. 24.11.4 "Software Access to Related Structures".
7366 */
7367 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7368 Assert(pVmcs);
7369 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
7370
7371 int rc = iemVmxVmentryCheckExecCtls(pVCpu, pszInstr);
7372 if (RT_SUCCESS(rc))
7373 {
7374 rc = iemVmxVmentryCheckExitCtls(pVCpu, pszInstr);
7375 if (RT_SUCCESS(rc))
7376 {
7377 rc = iemVmxVmentryCheckEntryCtls(pVCpu, pszInstr);
7378 if (RT_SUCCESS(rc))
7379 {
7380 rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr);
7381 if (RT_SUCCESS(rc))
7382 {
7383 /* Initialize read-only VMCS fields before VM-entry since we don't update all of them for every VM-exit. */
7384 iemVmxVmentryInitReadOnlyFields(pVCpu);
7385
7386 /*
7387 * Blocking of NMIs need to be restored if VM-entry fails due to invalid-guest state.
7388 * So we save the VMCPU_FF_BLOCK_NMI force-flag here so we can restore it on
7389 * VM-exit when required.
7390 * See Intel spec. 26.7 "VM-entry Failures During or After Loading Guest State"
7391 */
7392 iemVmxVmentrySaveNmiBlockingFF(pVCpu);
7393
7394 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
7395 if (RT_SUCCESS(rc))
7396 {
7397 rc = iemVmxVmentryLoadGuestState(pVCpu, pszInstr);
7398 if (RT_SUCCESS(rc))
7399 {
7400 rc = iemVmxVmentryLoadGuestAutoMsrs(pVCpu, pszInstr);
7401 if (RT_SUCCESS(rc))
7402 {
7403 Assert(rc != VINF_CPUM_R3_MSR_WRITE);
7404
7405 /* VMLAUNCH instruction must update the VMCS launch state. */
7406 if (uInstrId == VMXINSTRID_VMLAUNCH)
7407 pVmcs->fVmcsState = VMX_V_VMCS_LAUNCH_STATE_LAUNCHED;
7408
7409 /* Perform the VMX transition (PGM updates). */
7410 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
7411 if (rcStrict == VINF_SUCCESS)
7412 { /* likely */ }
7413 else if (RT_SUCCESS(rcStrict))
7414 {
7415 Log3(("%s: iemVmxWorldSwitch returns %Rrc -> Setting passup status\n", pszInstr,
7416 VBOXSTRICTRC_VAL(rcStrict)));
7417 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7418 }
7419 else
7420 {
7421 Log3(("%s: iemVmxWorldSwitch failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
7422 return rcStrict;
7423 }
7424
7425 /* We've now entered nested-guest execution. */
7426 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true;
7427
7428 /*
7429 * The priority of potential VM-exits during VM-entry is important.
7430 * The priorities of VM-exits and events are listed from highest
7431 * to lowest as follows:
7432 *
7433 * 1. Event injection.
7434 * 2. Trap on task-switch (T flag set in TSS).
7435 * 3. TPR below threshold / APIC-write.
7436 * 4. SMI, INIT.
7437 * 5. MTF exit.
7438 * 6. Debug-trap exceptions (EFLAGS.TF), pending debug exceptions.
7439 * 7. VMX-preemption timer.
7440 * 9. NMI-window exit.
7441 * 10. NMI injection.
7442 * 11. Interrupt-window exit.
7443 * 12. Virtual-interrupt injection.
7444 * 13. Interrupt injection.
7445 * 14. Process next instruction (fetch, decode, execute).
7446 */
7447
7448 /* Setup the VMX-preemption timer. */
7449 iemVmxVmentrySetupPreemptTimer(pVCpu, pszInstr);
7450
7451 /* Setup monitor-trap flag. */
7452 iemVmxVmentrySetupMtf(pVCpu, pszInstr);
7453
7454 /* Setup NMI-window exiting. */
7455 iemVmxVmentrySetupNmiWindow(pVCpu, pszInstr);
7456
7457 /* Setup interrupt-window exiting. */
7458 iemVmxVmentrySetupIntWindow(pVCpu, pszInstr);
7459
7460 /* Now that we've switched page tables, we can go ahead and inject any event. */
7461 rcStrict = iemVmxVmentryInjectEvent(pVCpu, pszInstr);
7462 if (RT_SUCCESS(rcStrict))
7463 {
7464 /* Reschedule to IEM-only execution of the nested-guest or return VINF_SUCCESS. */
7465# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
7466 Log(("%s: Enabling IEM-only EM execution policy!\n", pszInstr));
7467 int rcSched = EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
7468 if (rcSched != VINF_SUCCESS)
7469 iemSetPassUpStatus(pVCpu, rcSched);
7470# endif
7471 return VINF_SUCCESS;
7472 }
7473
7474 Log(("%s: VM-entry event injection failed. rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
7475 return rcStrict;
7476 }
7477 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED,
7478 pVmcs->u64RoExitQual.u);
7479 }
7480 }
7481 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED,
7482 pVmcs->u64RoExitQual.u);
7483 }
7484
7485 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_HOST_STATE);
7486 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7487 return VINF_SUCCESS;
7488 }
7489 }
7490 }
7491
7492 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
7493 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7494 return VINF_SUCCESS;
7495# endif
7496}
7497
7498
7499/**
7500 * Checks whether an RDMSR or WRMSR instruction for the given MSR is intercepted
7501 * (causes a VM-exit) or not.
7502 *
7503 * @returns @c true if the instruction is intercepted, @c false otherwise.
7504 * @param pVCpu The cross context virtual CPU structure.
7505 * @param uExitReason The VM-exit reason (VMX_EXIT_RDMSR or
7506 * VMX_EXIT_WRMSR).
7507 * @param idMsr The MSR.
7508 */
7509IEM_STATIC bool iemVmxIsRdmsrWrmsrInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr)
7510{
7511 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
7512 Assert( uExitReason == VMX_EXIT_RDMSR
7513 || uExitReason == VMX_EXIT_WRMSR);
7514
7515 /* Consult the MSR bitmap if the feature is supported. */
7516 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7517 Assert(pVmcs);
7518 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7519 {
7520 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
7521 uint32_t const fMsrpm = CPUMGetVmxMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr);
7522 if (uExitReason == VMX_EXIT_RDMSR)
7523 return RT_BOOL(fMsrpm & VMXMSRPM_EXIT_RD);
7524 return RT_BOOL(fMsrpm & VMXMSRPM_EXIT_WR);
7525 }
7526
7527 /* Without MSR bitmaps, all MSR accesses are intercepted. */
7528 return true;
7529}
7530
7531
7532/**
7533 * VMREAD common (memory/register) instruction execution worker
7534 *
7535 * @returns Strict VBox status code.
7536 * @param pVCpu The cross context virtual CPU structure.
7537 * @param cbInstr The instruction length in bytes.
7538 * @param pu64Dst Where to write the VMCS value (only updated when
7539 * VINF_SUCCESS is returned).
7540 * @param u64VmcsField The VMCS field.
7541 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
7542 * NULL.
7543 */
7544IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64VmcsField,
7545 PCVMXVEXITINFO pExitInfo)
7546{
7547 /* Nested-guest intercept. */
7548 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7549 && CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMREAD, u64VmcsField))
7550 {
7551 if (pExitInfo)
7552 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7553 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMREAD, VMXINSTRID_VMREAD, cbInstr);
7554 }
7555
7556 /* CPL. */
7557 if (pVCpu->iem.s.uCpl == 0)
7558 { /* likely */ }
7559 else
7560 {
7561 Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7562 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Cpl;
7563 return iemRaiseGeneralProtectionFault0(pVCpu);
7564 }
7565
7566 /* VMCS pointer in root mode. */
7567 if ( !IEM_VMX_IS_ROOT_MODE(pVCpu)
7568 || IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
7569 { /* likely */ }
7570 else
7571 {
7572 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7573 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrInvalid;
7574 iemVmxVmFailInvalid(pVCpu);
7575 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7576 return VINF_SUCCESS;
7577 }
7578
7579 /* VMCS-link pointer in non-root mode. */
7580 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7581 || IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
7582 { /* likely */ }
7583 else
7584 {
7585 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
7586 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_LinkPtrInvalid;
7587 iemVmxVmFailInvalid(pVCpu);
7588 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7589 return VINF_SUCCESS;
7590 }
7591
7592 /* Supported VMCS field. */
7593 if (CPUMIsGuestVmxVmcsFieldValid(pVCpu->CTX_SUFF(pVM), u64VmcsField))
7594 { /* likely */ }
7595 else
7596 {
7597 Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64VmcsField));
7598 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_FieldInvalid;
7599 iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
7600 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7601 return VINF_SUCCESS;
7602 }
7603
7604 /*
7605 * Setup reading from the current or shadow VMCS.
7606 */
7607 uint8_t *pbVmcs;
7608 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7609 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7610 else
7611 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
7612 Assert(pbVmcs);
7613
7614 VMXVMCSFIELD VmcsField;
7615 VmcsField.u = u64VmcsField;
7616 uint8_t const uWidth = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_WIDTH);
7617 uint8_t const uType = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_TYPE);
7618 uint8_t const uWidthType = (uWidth << 2) | uType;
7619 uint8_t const uIndex = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_INDEX);
7620 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
7621 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
7622 Assert(offField < VMX_V_VMCS_SIZE);
7623
7624 /*
7625 * Read the VMCS component based on the field's effective width.
7626 *
7627 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
7628 * indicates high bits (little endian).
7629 *
7630 * Note! The caller is responsible to trim the result and update registers
7631 * or memory locations are required. Here we just zero-extend to the largest
7632 * type (i.e. 64-bits).
7633 */
7634 uint8_t *pbField = pbVmcs + offField;
7635 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(VmcsField.u);
7636 switch (uEffWidth)
7637 {
7638 case VMX_VMCSFIELD_WIDTH_64BIT:
7639 case VMX_VMCSFIELD_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break;
7640 case VMX_VMCSFIELD_WIDTH_32BIT: *pu64Dst = *(uint32_t *)pbField; break;
7641 case VMX_VMCSFIELD_WIDTH_16BIT: *pu64Dst = *(uint16_t *)pbField; break;
7642 }
7643 return VINF_SUCCESS;
7644}
7645
7646
7647/**
7648 * VMREAD (64-bit register) instruction execution worker.
7649 *
7650 * @returns Strict VBox status code.
7651 * @param pVCpu The cross context virtual CPU structure.
7652 * @param cbInstr The instruction length in bytes.
7653 * @param pu64Dst Where to store the VMCS field's value.
7654 * @param u64VmcsField The VMCS field.
7655 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
7656 * NULL.
7657 */
7658IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64VmcsField,
7659 PCVMXVEXITINFO pExitInfo)
7660{
7661 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, u64VmcsField, pExitInfo);
7662 if (rcStrict == VINF_SUCCESS)
7663 {
7664 iemVmxVmreadSuccess(pVCpu, cbInstr);
7665 return VINF_SUCCESS;
7666 }
7667
7668 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7669 return rcStrict;
7670}
7671
7672
7673/**
7674 * VMREAD (32-bit register) instruction execution worker.
7675 *
7676 * @returns Strict VBox status code.
7677 * @param pVCpu The cross context virtual CPU structure.
7678 * @param cbInstr The instruction length in bytes.
7679 * @param pu32Dst Where to store the VMCS field's value.
7680 * @param u32VmcsField The VMCS field.
7681 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
7682 * NULL.
7683 */
7684IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPU pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint64_t u32VmcsField,
7685 PCVMXVEXITINFO pExitInfo)
7686{
7687 uint64_t u64Dst;
7688 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u32VmcsField, pExitInfo);
7689 if (rcStrict == VINF_SUCCESS)
7690 {
7691 *pu32Dst = u64Dst;
7692 iemVmxVmreadSuccess(pVCpu, cbInstr);
7693 return VINF_SUCCESS;
7694 }
7695
7696 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7697 return rcStrict;
7698}
7699
7700
7701/**
7702 * VMREAD (memory) instruction execution worker.
7703 *
7704 * @returns Strict VBox status code.
7705 * @param pVCpu The cross context virtual CPU structure.
7706 * @param cbInstr The instruction length in bytes.
7707 * @param iEffSeg The effective segment register to use with @a u64Val.
7708 * Pass UINT8_MAX if it is a register access.
7709 * @param GCPtrDst The guest linear address to store the VMCS field's
7710 * value.
7711 * @param u64VmcsField The VMCS field.
7712 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
7713 * NULL.
7714 */
7715IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDst, uint64_t u64VmcsField,
7716 PCVMXVEXITINFO pExitInfo)
7717{
7718 uint64_t u64Dst;
7719 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u64VmcsField, pExitInfo);
7720 if (rcStrict == VINF_SUCCESS)
7721 {
7722 /*
7723 * Write the VMCS field's value to the location specified in guest-memory.
7724 */
7725 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7726 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
7727 else
7728 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
7729 if (rcStrict == VINF_SUCCESS)
7730 {
7731 iemVmxVmreadSuccess(pVCpu, cbInstr);
7732 return VINF_SUCCESS;
7733 }
7734
7735 Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
7736 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrMap;
7737 return rcStrict;
7738 }
7739
7740 Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7741 return rcStrict;
7742}
7743
7744
7745/**
7746 * VMWRITE instruction execution worker.
7747 *
7748 * @returns Strict VBox status code.
7749 * @param pVCpu The cross context virtual CPU structure.
7750 * @param cbInstr The instruction length in bytes.
7751 * @param iEffSeg The effective segment register to use with @a u64Val.
7752 * Pass UINT8_MAX if it is a register access.
7753 * @param u64Val The value to write (or guest linear address to the
7754 * value), @a iEffSeg will indicate if it's a memory
7755 * operand.
7756 * @param u64VmcsField The VMCS field.
7757 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
7758 * NULL.
7759 */
7760IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, uint64_t u64Val, uint64_t u64VmcsField,
7761 PCVMXVEXITINFO pExitInfo)
7762{
7763 /* Nested-guest intercept. */
7764 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7765 && CPUMIsGuestVmxVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMWRITE, u64VmcsField))
7766 {
7767 if (pExitInfo)
7768 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7769 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMWRITE, VMXINSTRID_VMWRITE, cbInstr);
7770 }
7771
7772 /* CPL. */
7773 if (pVCpu->iem.s.uCpl == 0)
7774 { /* likely */ }
7775 else
7776 {
7777 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7778 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Cpl;
7779 return iemRaiseGeneralProtectionFault0(pVCpu);
7780 }
7781
7782 /* VMCS pointer in root mode. */
7783 if ( !IEM_VMX_IS_ROOT_MODE(pVCpu)
7784 || IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
7785 { /* likely */ }
7786 else
7787 {
7788 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7789 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrInvalid;
7790 iemVmxVmFailInvalid(pVCpu);
7791 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7792 return VINF_SUCCESS;
7793 }
7794
7795 /* VMCS-link pointer in non-root mode. */
7796 if ( !IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7797 || IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
7798 { /* likely */ }
7799 else
7800 {
7801 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
7802 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_LinkPtrInvalid;
7803 iemVmxVmFailInvalid(pVCpu);
7804 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7805 return VINF_SUCCESS;
7806 }
7807
7808 /* If the VMWRITE instruction references memory, access the specified memory operand. */
7809 bool const fIsRegOperand = iEffSeg == UINT8_MAX;
7810 if (!fIsRegOperand)
7811 {
7812 /* Read the value from the specified guest memory location. */
7813 VBOXSTRICTRC rcStrict;
7814 RTGCPTR const GCPtrVal = u64Val;
7815 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7816 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
7817 else
7818 {
7819 uint32_t u32Val;
7820 rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, iEffSeg, GCPtrVal);
7821 u64Val = u32Val;
7822 }
7823 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7824 {
7825 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
7826 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrMap;
7827 return rcStrict;
7828 }
7829 }
7830 else
7831 Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
7832
7833 /* Supported VMCS field. */
7834 if (CPUMIsGuestVmxVmcsFieldValid(pVCpu->CTX_SUFF(pVM), u64VmcsField))
7835 { /* likely */ }
7836 else
7837 {
7838 Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64VmcsField));
7839 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldInvalid;
7840 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
7841 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7842 return VINF_SUCCESS;
7843 }
7844
7845 /* Read-only VMCS field. */
7846 bool const fIsFieldReadOnly = HMVmxIsVmcsFieldReadOnly(u64VmcsField);
7847 if ( !fIsFieldReadOnly
7848 || IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
7849 { /* likely */ }
7850 else
7851 {
7852 Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64VmcsField));
7853 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldRo;
7854 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
7855 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7856 return VINF_SUCCESS;
7857 }
7858
7859 /*
7860 * Setup writing to the current or shadow VMCS.
7861 */
7862 uint8_t *pbVmcs;
7863 if (!IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7864 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7865 else
7866 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
7867 Assert(pbVmcs);
7868
7869 VMXVMCSFIELD VmcsField;
7870 VmcsField.u = u64VmcsField;
7871 uint8_t const uWidth = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_WIDTH);
7872 uint8_t const uType = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_TYPE);
7873 uint8_t const uWidthType = (uWidth << 2) | uType;
7874 uint8_t const uIndex = RT_BF_GET(VmcsField.u, VMX_BF_VMCSFIELD_INDEX);
7875 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
7876 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
7877 Assert(offField < VMX_V_VMCS_SIZE);
7878
7879 /*
7880 * Write the VMCS component based on the field's effective width.
7881 *
7882 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
7883 * indicates high bits (little endian).
7884 */
7885 uint8_t *pbField = pbVmcs + offField;
7886 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(VmcsField.u);
7887 switch (uEffWidth)
7888 {
7889 case VMX_VMCSFIELD_WIDTH_64BIT:
7890 case VMX_VMCSFIELD_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
7891 case VMX_VMCSFIELD_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break;
7892 case VMX_VMCSFIELD_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break;
7893 }
7894
7895 iemVmxVmSucceed(pVCpu);
7896 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7897 return VINF_SUCCESS;
7898}
7899
7900
7901/**
7902 * VMCLEAR instruction execution worker.
7903 *
7904 * @returns Strict VBox status code.
7905 * @param pVCpu The cross context virtual CPU structure.
7906 * @param cbInstr The instruction length in bytes.
7907 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
7908 * @param GCPtrVmcs The linear address of the VMCS pointer.
7909 * @param pExitInfo Pointer to the VM-exit information. Optional, can be NULL.
7910 *
7911 * @remarks Common VMX instruction checks are already expected to by the caller,
7912 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7913 */
7914IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
7915 PCVMXVEXITINFO pExitInfo)
7916{
7917 /* Nested-guest intercept. */
7918 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7919 {
7920 if (pExitInfo)
7921 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7922 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMCLEAR, VMXINSTRID_NONE, cbInstr);
7923 }
7924
7925 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7926
7927 /* CPL. */
7928 if (pVCpu->iem.s.uCpl == 0)
7929 { /* likely */ }
7930 else
7931 {
7932 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7933 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Cpl;
7934 return iemRaiseGeneralProtectionFault0(pVCpu);
7935 }
7936
7937 /* Get the VMCS pointer from the location specified by the source memory operand. */
7938 RTGCPHYS GCPhysVmcs;
7939 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
7940 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7941 { /* likely */ }
7942 else
7943 {
7944 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
7945 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrMap;
7946 return rcStrict;
7947 }
7948
7949 /* VMCS pointer alignment. */
7950 if (!(GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK))
7951 { /* likely */ }
7952 else
7953 {
7954 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
7955 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAlign;
7956 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7957 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7958 return VINF_SUCCESS;
7959 }
7960
7961 /* VMCS physical-address width limits. */
7962 if (!(GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth))
7963 { /* likely */ }
7964 else
7965 {
7966 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
7967 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrWidth;
7968 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7969 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7970 return VINF_SUCCESS;
7971 }
7972
7973 /* VMCS is not the VMXON region. */
7974 if (GCPhysVmcs != pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
7975 { /* likely */ }
7976 else
7977 {
7978 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
7979 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrVmxon;
7980 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
7981 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7982 return VINF_SUCCESS;
7983 }
7984
7985 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
7986 restriction imposed by our implementation. */
7987 if (PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
7988 { /* likely */ }
7989 else
7990 {
7991 Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
7992 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAbnormal;
7993 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7994 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7995 return VINF_SUCCESS;
7996 }
7997
7998 /*
7999 * VMCLEAR allows committing and clearing any valid VMCS pointer.
8000 *
8001 * If the current VMCS is the one being cleared, set its state to 'clear' and commit
8002 * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
8003 * to 'clear'.
8004 */
8005 uint8_t const fVmcsLaunchStateClear = VMX_V_VMCS_LAUNCH_STATE_CLEAR;
8006 if ( IEM_VMX_HAS_CURRENT_VMCS(pVCpu)
8007 && IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
8008 {
8009 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsLaunchStateClear;
8010 iemVmxWriteCurrentVmcsToGstMem(pVCpu);
8011 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
8012 }
8013 else
8014 {
8015 AssertCompileMemberSize(VMXVVMCS, fVmcsState, sizeof(fVmcsLaunchStateClear));
8016 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + RT_UOFFSETOF(VMXVVMCS, fVmcsState),
8017 (const void *)&fVmcsLaunchStateClear, sizeof(fVmcsLaunchStateClear));
8018 if (RT_FAILURE(rcStrict))
8019 return rcStrict;
8020 }
8021
8022 iemVmxVmSucceed(pVCpu);
8023 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8024 return VINF_SUCCESS;
8025}
8026
8027
8028/**
8029 * VMPTRST instruction execution worker.
8030 *
8031 * @returns Strict VBox status code.
8032 * @param pVCpu The cross context virtual CPU structure.
8033 * @param cbInstr The instruction length in bytes.
8034 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
8035 * @param GCPtrVmcs The linear address of where to store the current VMCS
8036 * pointer.
8037 * @param pExitInfo Pointer to the VM-exit information. Optional, can be NULL.
8038 *
8039 * @remarks Common VMX instruction checks are already expected to by the caller,
8040 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8041 */
8042IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
8043 PCVMXVEXITINFO pExitInfo)
8044{
8045 /* Nested-guest intercept. */
8046 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8047 {
8048 if (pExitInfo)
8049 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
8050 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRST, VMXINSTRID_NONE, cbInstr);
8051 }
8052
8053 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
8054
8055 /* CPL. */
8056 if (pVCpu->iem.s.uCpl == 0)
8057 { /* likely */ }
8058 else
8059 {
8060 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8061 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Cpl;
8062 return iemRaiseGeneralProtectionFault0(pVCpu);
8063 }
8064
8065 /* Set the VMCS pointer to the location specified by the destination memory operand. */
8066 AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
8067 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
8068 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8069 {
8070 iemVmxVmSucceed(pVCpu);
8071 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8072 return rcStrict;
8073 }
8074
8075 Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
8076 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_PtrMap;
8077 return rcStrict;
8078}
8079
8080
8081/**
8082 * VMPTRLD instruction execution worker.
8083 *
8084 * @returns Strict VBox status code.
8085 * @param pVCpu The cross context virtual CPU structure.
8086 * @param cbInstr The instruction length in bytes.
8087 * @param GCPtrVmcs The linear address of the current VMCS pointer.
8088 * @param pExitInfo Pointer to the VM-exit information. Optional, can be NULL.
8089 *
8090 * @remarks Common VMX instruction checks are already expected to by the caller,
8091 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8092 */
8093IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
8094 PCVMXVEXITINFO pExitInfo)
8095{
8096 /* Nested-guest intercept. */
8097 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8098 {
8099 if (pExitInfo)
8100 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
8101 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRLD, VMXINSTRID_NONE, cbInstr);
8102 }
8103
8104 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
8105
8106 /* CPL. */
8107 if (pVCpu->iem.s.uCpl == 0)
8108 { /* likely */ }
8109 else
8110 {
8111 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8112 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Cpl;
8113 return iemRaiseGeneralProtectionFault0(pVCpu);
8114 }
8115
8116 /* Get the VMCS pointer from the location specified by the source memory operand. */
8117 RTGCPHYS GCPhysVmcs;
8118 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
8119 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8120 { /* likely */ }
8121 else
8122 {
8123 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
8124 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrMap;
8125 return rcStrict;
8126 }
8127
8128 /* VMCS pointer alignment. */
8129 if (!(GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK))
8130 { /* likely */ }
8131 else
8132 {
8133 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
8134 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAlign;
8135 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
8136 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8137 return VINF_SUCCESS;
8138 }
8139
8140 /* VMCS physical-address width limits. */
8141 if (!(GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth))
8142 { /* likely */ }
8143 else
8144 {
8145 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
8146 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrWidth;
8147 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
8148 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8149 return VINF_SUCCESS;
8150 }
8151
8152 /* VMCS is not the VMXON region. */
8153 if (GCPhysVmcs != pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
8154 { /* likely */ }
8155 else
8156 {
8157 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
8158 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrVmxon;
8159 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
8160 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8161 return VINF_SUCCESS;
8162 }
8163
8164 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
8165 restriction imposed by our implementation. */
8166 if (PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
8167 { /* likely */ }
8168 else
8169 {
8170 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
8171 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAbnormal;
8172 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
8173 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8174 return VINF_SUCCESS;
8175 }
8176
8177 /* Read just the VMCS revision from the VMCS. */
8178 VMXVMCSREVID VmcsRevId;
8179 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
8180 if (RT_SUCCESS(rc))
8181 { /* likely */ }
8182 else
8183 {
8184 Log(("vmptrld: Failed to read revision identifier from VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
8185 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_RevPtrReadPhys;
8186 return rc;
8187 }
8188
8189 /*
8190 * Verify the VMCS revision specified by the guest matches what we reported to the guest.
8191 * Verify the VMCS is not a shadow VMCS, if the VMCS shadowing feature is supported.
8192 */
8193 if ( VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID
8194 && ( !VmcsRevId.n.fIsShadowVmcs
8195 || IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
8196 { /* likely */ }
8197 else
8198 {
8199 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
8200 {
8201 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32, GCPtrVmcs=%#RGv GCPhysVmcs=%#RGp -> VMFail()\n",
8202 VMX_V_VMCS_REVISION_ID, VmcsRevId.n.u31RevisionId, GCPtrVmcs, GCPhysVmcs));
8203 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_VmcsRevId;
8204 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
8205 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8206 return VINF_SUCCESS;
8207 }
8208
8209 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
8210 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_ShadowVmcs;
8211 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
8212 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8213 return VINF_SUCCESS;
8214 }
8215
8216 /*
8217 * We cache only the current VMCS in CPUMCTX. Therefore, VMPTRLD should always flush
8218 * the cache of an existing, current VMCS back to guest memory before loading a new,
8219 * different current VMCS.
8220 */
8221 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
8222 {
8223 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
8224 {
8225 iemVmxWriteCurrentVmcsToGstMem(pVCpu);
8226 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
8227 }
8228
8229 /* Set the new VMCS as the current VMCS and read it from guest memory. */
8230 IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
8231 rc = iemVmxReadCurrentVmcsFromGstMem(pVCpu);
8232 if (RT_SUCCESS(rc))
8233 { /* likely */ }
8234 else
8235 {
8236 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
8237 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrReadPhys;
8238 return rc;
8239 }
8240 }
8241
8242 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
8243 iemVmxVmSucceed(pVCpu);
8244 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8245 return VINF_SUCCESS;
8246}
8247
8248
8249/**
8250 * INVVPID instruction execution worker.
8251 *
8252 * @returns Strict VBox status code.
8253 * @param pVCpu The cross context virtual CPU structure.
8254 * @param cbInstr The instruction length in bytes.
8255 * @param iEffSeg The segment of the invvpid descriptor.
8256 * @param GCPtrInvvpidDesc The address of invvpid descriptor.
8257 * @param u64InvvpidType The invalidation type.
8258 * @param pExitInfo Pointer to the VM-exit information. Optional, can be
8259 * NULL.
8260 *
8261 * @remarks Common VMX instruction checks are already expected to by the caller,
8262 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8263 */
8264IEM_STATIC VBOXSTRICTRC iemVmxInvvpid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrInvvpidDesc,
8265 uint64_t u64InvvpidType, PCVMXVEXITINFO pExitInfo)
8266{
8267 /* Check if INVVPID instruction is supported, otherwise raise #UD. */
8268 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVpid)
8269 return iemRaiseUndefinedOpcode(pVCpu);
8270
8271 /* Nested-guest intercept. */
8272 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8273 {
8274 if (pExitInfo)
8275 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
8276 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_INVVPID, VMXINSTRID_NONE, cbInstr);
8277 }
8278
8279 /* CPL. */
8280 if (pVCpu->iem.s.uCpl != 0)
8281 {
8282 Log(("invvpid: CPL != 0 -> #GP(0)\n"));
8283 return iemRaiseGeneralProtectionFault0(pVCpu);
8284 }
8285
8286 /*
8287 * Validate INVVPID invalidation type.
8288 *
8289 * The instruction specifies exactly ONE of the supported invalidation types.
8290 *
8291 * Each of the types has a bit in IA32_VMX_EPT_VPID_CAP MSR specifying if it is
8292 * supported. In theory, it's possible for a CPU to not support flushing individual
8293 * addresses but all the other types or any other combination. We do not take any
8294 * shortcuts here by assuming the types we currently expose to the guest.
8295 */
8296 uint64_t const fCaps = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64EptVpidCaps;
8297 uint8_t const fTypeIndivAddr = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_INDIV_ADDR);
8298 uint8_t const fTypeSingleCtx = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX);
8299 uint8_t const fTypeAllCtx = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_ALL_CTX);
8300 uint8_t const fTypeSingleCtxRetainGlobals = RT_BF_GET(fCaps, VMX_BF_EPT_VPID_CAP_INVVPID_SINGLE_CTX_RETAIN_GLOBALS);
8301 if ( (fTypeIndivAddr && u64InvvpidType == VMXTLBFLUSHVPID_INDIV_ADDR)
8302 || (fTypeSingleCtx && u64InvvpidType == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
8303 || (fTypeAllCtx && u64InvvpidType == VMXTLBFLUSHVPID_ALL_CONTEXTS)
8304 || (fTypeSingleCtxRetainGlobals && u64InvvpidType == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS))
8305 { /* likely */ }
8306 else
8307 {
8308 Log(("invvpid: invalid/unsupported invvpid type %#x -> VMFail\n", u64InvvpidType));
8309 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_TypeInvalid;
8310 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
8311 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8312 return VINF_SUCCESS;
8313 }
8314
8315 /*
8316 * Fetch the invvpid descriptor from guest memory.
8317 */
8318 RTUINT128U uDesc;
8319 VBOXSTRICTRC rcStrict = iemMemFetchDataU128(pVCpu, &uDesc, iEffSeg, GCPtrInvvpidDesc);
8320 if (rcStrict == VINF_SUCCESS)
8321 {
8322 /*
8323 * Validate the descriptor.
8324 */
8325 if (uDesc.s.Lo > 0xfff)
8326 {
8327 Log(("invvpid: reserved bits set in invvpid descriptor %#RX64 -> #GP(0)\n", uDesc.s.Lo));
8328 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_DescRsvd;
8329 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
8330 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8331 return VINF_SUCCESS;
8332 }
8333
8334 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
8335 RTGCUINTPTR64 const GCPtrInvAddr = uDesc.s.Hi;
8336 uint8_t const uVpid = uDesc.s.Lo & UINT64_C(0xfff);
8337 uint64_t const uCr3 = pVCpu->cpum.GstCtx.cr3;
8338 switch (u64InvvpidType)
8339 {
8340 case VMXTLBFLUSHVPID_INDIV_ADDR:
8341 {
8342 if (uVpid != 0)
8343 {
8344 if (IEM_IS_CANONICAL(GCPtrInvAddr))
8345 {
8346 /* Invalidate mappings for the linear address tagged with VPID. */
8347 /** @todo PGM support for VPID? Currently just flush everything. */
8348 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
8349 iemVmxVmSucceed(pVCpu);
8350 }
8351 else
8352 {
8353 Log(("invvpid: invalidation address %#RGP is not canonical -> VMFail\n", GCPtrInvAddr));
8354 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type0InvalidAddr;
8355 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
8356 }
8357 }
8358 else
8359 {
8360 Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, u64InvvpidType));
8361 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type0InvalidVpid;
8362 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
8363 }
8364 break;
8365 }
8366
8367 case VMXTLBFLUSHVPID_SINGLE_CONTEXT:
8368 {
8369 if (uVpid != 0)
8370 {
8371 /* Invalidate all mappings with VPID. */
8372 /** @todo PGM support for VPID? Currently just flush everything. */
8373 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
8374 iemVmxVmSucceed(pVCpu);
8375 }
8376 else
8377 {
8378 Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, u64InvvpidType));
8379 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type1InvalidVpid;
8380 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
8381 }
8382 break;
8383 }
8384
8385 case VMXTLBFLUSHVPID_ALL_CONTEXTS:
8386 {
8387 /* Invalidate all mappings with non-zero VPIDs. */
8388 /** @todo PGM support for VPID? Currently just flush everything. */
8389 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
8390 iemVmxVmSucceed(pVCpu);
8391 break;
8392 }
8393
8394 case VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS:
8395 {
8396 if (uVpid != 0)
8397 {
8398 /* Invalidate all mappings with VPID except global translations. */
8399 /** @todo PGM support for VPID? Currently just flush everything. */
8400 PGMFlushTLB(pVCpu, uCr3, true /* fGlobal */);
8401 iemVmxVmSucceed(pVCpu);
8402 }
8403 else
8404 {
8405 Log(("invvpid: invalid VPID %#x for invalidation type %u -> VMFail\n", uVpid, u64InvvpidType));
8406 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Invvpid_Type3InvalidVpid;
8407 iemVmxVmFail(pVCpu, VMXINSTRERR_INVEPT_INVVPID_INVALID_OPERAND);
8408 }
8409 break;
8410 }
8411 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8412 }
8413 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8414 }
8415 return rcStrict;
8416}
8417
8418
8419/**
8420 * VMXON instruction execution worker.
8421 *
8422 * @returns Strict VBox status code.
8423 * @param pVCpu The cross context virtual CPU structure.
8424 * @param cbInstr The instruction length in bytes.
8425 * @param iEffSeg The effective segment register to use with @a
8426 * GCPtrVmxon.
8427 * @param GCPtrVmxon The linear address of the VMXON pointer.
8428 * @param pExitInfo Pointer to the VM-exit information. Optional, can be NULL.
8429 *
8430 * @remarks Common VMX instruction checks are already expected to by the caller,
8431 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8432 */
8433IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
8434 PCVMXVEXITINFO pExitInfo)
8435{
8436 if (!IEM_VMX_IS_ROOT_MODE(pVCpu))
8437 {
8438 /* CPL. */
8439 if (pVCpu->iem.s.uCpl == 0)
8440 { /* likely */ }
8441 else
8442 {
8443 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8444 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cpl;
8445 return iemRaiseGeneralProtectionFault0(pVCpu);
8446 }
8447
8448 /* A20M (A20 Masked) mode. */
8449 if (PGMPhysIsA20Enabled(pVCpu))
8450 { /* likely */ }
8451 else
8452 {
8453 Log(("vmxon: A20M mode -> #GP(0)\n"));
8454 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_A20M;
8455 return iemRaiseGeneralProtectionFault0(pVCpu);
8456 }
8457
8458 /* CR0. */
8459 {
8460 /* CR0 MB1 bits. */
8461 uint64_t const uCr0Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed0;
8462 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) == uCr0Fixed0)
8463 { /* likely */ }
8464 else
8465 {
8466 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
8467 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed0;
8468 return iemRaiseGeneralProtectionFault0(pVCpu);
8469 }
8470
8471 /* CR0 MBZ bits. */
8472 uint64_t const uCr0Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr0Fixed1;
8473 if (!(pVCpu->cpum.GstCtx.cr0 & ~uCr0Fixed1))
8474 { /* likely */ }
8475 else
8476 {
8477 Log(("vmxon: CR0 fixed1 bits set -> #GP(0)\n"));
8478 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed1;
8479 return iemRaiseGeneralProtectionFault0(pVCpu);
8480 }
8481 }
8482
8483 /* CR4. */
8484 {
8485 /* CR4 MB1 bits. */
8486 uint64_t const uCr4Fixed0 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed0;
8487 if ((pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0) == uCr4Fixed0)
8488 { /* likely */ }
8489 else
8490 {
8491 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
8492 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed0;
8493 return iemRaiseGeneralProtectionFault0(pVCpu);
8494 }
8495
8496 /* CR4 MBZ bits. */
8497 uint64_t const uCr4Fixed1 = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64Cr4Fixed1;
8498 if (!(pVCpu->cpum.GstCtx.cr4 & ~uCr4Fixed1))
8499 { /* likely */ }
8500 else
8501 {
8502 Log(("vmxon: CR4 fixed1 bits set -> #GP(0)\n"));
8503 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed1;
8504 return iemRaiseGeneralProtectionFault0(pVCpu);
8505 }
8506 }
8507
8508 /* Feature control MSR's LOCK and VMXON bits. */
8509 uint64_t const uMsrFeatCtl = pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs.u64FeatCtrl;
8510 if ((uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON))
8511 == (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON))
8512 { /* likely */ }
8513 else
8514 {
8515 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
8516 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_MsrFeatCtl;
8517 return iemRaiseGeneralProtectionFault0(pVCpu);
8518 }
8519
8520 /* Get the VMXON pointer from the location specified by the source memory operand. */
8521 RTGCPHYS GCPhysVmxon;
8522 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
8523 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
8524 { /* likely */ }
8525 else
8526 {
8527 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
8528 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrMap;
8529 return rcStrict;
8530 }
8531
8532 /* VMXON region pointer alignment. */
8533 if (!(GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK))
8534 { /* likely */ }
8535 else
8536 {
8537 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
8538 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAlign;
8539 iemVmxVmFailInvalid(pVCpu);
8540 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8541 return VINF_SUCCESS;
8542 }
8543
8544 /* VMXON physical-address width limits. */
8545 if (!(GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth))
8546 { /* likely */ }
8547 else
8548 {
8549 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
8550 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrWidth;
8551 iemVmxVmFailInvalid(pVCpu);
8552 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8553 return VINF_SUCCESS;
8554 }
8555
8556 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
8557 restriction imposed by our implementation. */
8558 if (PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
8559 { /* likely */ }
8560 else
8561 {
8562 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
8563 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAbnormal;
8564 iemVmxVmFailInvalid(pVCpu);
8565 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8566 return VINF_SUCCESS;
8567 }
8568
8569 /* Read the VMCS revision ID from the VMXON region. */
8570 VMXVMCSREVID VmcsRevId;
8571 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
8572 if (RT_SUCCESS(rc))
8573 { /* likely */ }
8574 else
8575 {
8576 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
8577 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrReadPhys;
8578 return rc;
8579 }
8580
8581 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
8582 if (RT_LIKELY(VmcsRevId.u == VMX_V_VMCS_REVISION_ID))
8583 { /* likely */ }
8584 else
8585 {
8586 /* Revision ID mismatch. */
8587 if (!VmcsRevId.n.fIsShadowVmcs)
8588 {
8589 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
8590 VmcsRevId.n.u31RevisionId));
8591 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmcsRevId;
8592 iemVmxVmFailInvalid(pVCpu);
8593 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8594 return VINF_SUCCESS;
8595 }
8596
8597 /* Shadow VMCS disallowed. */
8598 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
8599 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_ShadowVmcs;
8600 iemVmxVmFailInvalid(pVCpu);
8601 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8602 return VINF_SUCCESS;
8603 }
8604
8605 /*
8606 * Record that we're in VMX operation, block INIT, block and disable A20M.
8607 */
8608 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
8609 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
8610 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
8611
8612 /* Clear address-range monitoring. */
8613 EMMonitorWaitClear(pVCpu);
8614 /** @todo NSTVMX: Intel PT. */
8615
8616 iemVmxVmSucceed(pVCpu);
8617 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8618 return VINF_SUCCESS;
8619 }
8620 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8621 {
8622 /* Nested-guest intercept. */
8623 if (pExitInfo)
8624 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
8625 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMXON, VMXINSTRID_NONE, cbInstr);
8626 }
8627
8628 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
8629
8630 /* CPL. */
8631 if (pVCpu->iem.s.uCpl > 0)
8632 {
8633 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8634 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxRootCpl;
8635 return iemRaiseGeneralProtectionFault0(pVCpu);
8636 }
8637
8638 /* VMXON when already in VMX root mode. */
8639 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
8640 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxAlreadyRoot;
8641 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8642 return VINF_SUCCESS;
8643}
8644
8645
8646/**
8647 * Implements 'VMXOFF'.
8648 *
8649 * @remarks Common VMX instruction checks are already expected to by the caller,
8650 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8651 */
8652IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
8653{
8654 /* Nested-guest intercept. */
8655 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8656 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMXOFF, cbInstr);
8657
8658 /* CPL. */
8659 if (pVCpu->iem.s.uCpl == 0)
8660 { /* likely */ }
8661 else
8662 {
8663 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8664 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxoff_Cpl;
8665 return iemRaiseGeneralProtectionFault0(pVCpu);
8666 }
8667
8668 /* Dual monitor treatment of SMIs and SMM. */
8669 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
8670 if (!(fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID))
8671 { /* likely */ }
8672 else
8673 {
8674 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
8675 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8676 return VINF_SUCCESS;
8677 }
8678
8679 /* Record that we're no longer in VMX root operation, block INIT, block and disable A20M. */
8680 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
8681 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
8682
8683 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
8684 { /** @todo NSTVMX: Unblock SMI. */ }
8685
8686 EMMonitorWaitClear(pVCpu);
8687 /** @todo NSTVMX: Unblock and enable A20M. */
8688
8689 iemVmxVmSucceed(pVCpu);
8690 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8691 return VINF_SUCCESS;
8692}
8693
8694
8695/**
8696 * Implements 'VMXON'.
8697 */
8698IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
8699{
8700 return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
8701}
8702
8703
8704/**
8705 * Implements 'VMLAUNCH'.
8706 */
8707IEM_CIMPL_DEF_0(iemCImpl_vmlaunch)
8708{
8709 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMLAUNCH);
8710}
8711
8712
8713/**
8714 * Implements 'VMRESUME'.
8715 */
8716IEM_CIMPL_DEF_0(iemCImpl_vmresume)
8717{
8718 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMRESUME);
8719}
8720
8721
8722/**
8723 * Implements 'VMPTRLD'.
8724 */
8725IEM_CIMPL_DEF_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
8726{
8727 return iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
8728}
8729
8730
8731/**
8732 * Implements 'VMPTRST'.
8733 */
8734IEM_CIMPL_DEF_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
8735{
8736 return iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
8737}
8738
8739
8740/**
8741 * Implements 'VMCLEAR'.
8742 */
8743IEM_CIMPL_DEF_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
8744{
8745 return iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
8746}
8747
8748
8749/**
8750 * Implements 'VMWRITE' register.
8751 */
8752IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64VmcsField)
8753{
8754 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, u64Val, u64VmcsField, NULL /* pExitInfo */);
8755}
8756
8757
8758/**
8759 * Implements 'VMWRITE' memory.
8760 */
8761IEM_CIMPL_DEF_3(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrVal, uint32_t, u64VmcsField)
8762{
8763 return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, GCPtrVal, u64VmcsField, NULL /* pExitInfo */);
8764}
8765
8766
8767/**
8768 * Implements 'VMREAD' register (64-bit).
8769 */
8770IEM_CIMPL_DEF_2(iemCImpl_vmread_reg64, uint64_t *, pu64Dst, uint64_t, u64VmcsField)
8771{
8772 return iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64VmcsField, NULL /* pExitInfo */);
8773}
8774
8775
8776/**
8777 * Implements 'VMREAD' register (32-bit).
8778 */
8779IEM_CIMPL_DEF_2(iemCImpl_vmread_reg32, uint32_t *, pu32Dst, uint32_t, u32VmcsField)
8780{
8781 return iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u32VmcsField, NULL /* pExitInfo */);
8782}
8783
8784
8785/**
8786 * Implements 'VMREAD' memory, 64-bit register.
8787 */
8788IEM_CIMPL_DEF_3(iemCImpl_vmread_mem_reg64, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u64VmcsField)
8789{
8790 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64VmcsField, NULL /* pExitInfo */);
8791}
8792
8793
8794/**
8795 * Implements 'VMREAD' memory, 32-bit register.
8796 */
8797IEM_CIMPL_DEF_3(iemCImpl_vmread_mem_reg32, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u32VmcsField)
8798{
8799 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u32VmcsField, NULL /* pExitInfo */);
8800}
8801
8802
8803/**
8804 * Implements 'INVVPID'.
8805 */
8806IEM_CIMPL_DEF_3(iemCImpl_invvpid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvvpidDesc, uint64_t, uInvvpidType)
8807{
8808 return iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, uInvvpidType, NULL /* pExitInfo */);
8809}
8810
8811
8812/**
8813 * Implements VMX's implementation of PAUSE.
8814 */
8815IEM_CIMPL_DEF_0(iemCImpl_vmx_pause)
8816{
8817 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8818 {
8819 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrPause(pVCpu, cbInstr);
8820 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
8821 return rcStrict;
8822 }
8823
8824 /*
8825 * Outside VMX non-root operation or if the PAUSE instruction does not cause
8826 * a VM-exit, the instruction operates normally.
8827 */
8828 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8829 return VINF_SUCCESS;
8830}
8831
8832#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
8833
8834
8835/**
8836 * Implements 'VMCALL'.
8837 */
8838IEM_CIMPL_DEF_0(iemCImpl_vmcall)
8839{
8840#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8841 /* Nested-guest intercept. */
8842 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8843 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMCALL, cbInstr);
8844#endif
8845
8846 /* Join forces with vmmcall. */
8847 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
8848}
8849
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette