VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 75638

Last change on this file since 75638 was 75638, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 Documented a hack that's no longer necessary and added a couple of flower boxes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 329.2 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 75638 2018-11-21 10:49:32Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
23/**
24 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
25 * relative offsets.
26 */
27# ifdef IEM_WITH_CODE_TLB
28# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
29# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
30# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
31# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
32# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
33# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
34# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
35# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
36# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
37# else /* !IEM_WITH_CODE_TLB */
38# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
39 do \
40 { \
41 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
42 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
43 } while (0)
44
45# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
46
47# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
48 do \
49 { \
50 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
51 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
52 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
53 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
54 } while (0)
55
56# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
57 do \
58 { \
59 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
60 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
61 } while (0)
62
63# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
64 do \
65 { \
66 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
67 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
68 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
69 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
70 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
71 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
72 } while (0)
73
74# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
75 do \
76 { \
77 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
78 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
79 } while (0)
80
81# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
82 do \
83 { \
84 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
85 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
86 } while (0)
87
88# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
89 do \
90 { \
91 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
92 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
93 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
94 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
95 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
96 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
97 } while (0)
98# endif /* !IEM_WITH_CODE_TLB */
99
100/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
101#define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
102
103/** Whether a shadow VMCS is present for the given VCPU. */
104#define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
105
106/** Gets the VMXON region pointer. */
107#define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
108
109/** Gets the guest-physical address of the current VMCS for the given VCPU. */
110#define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
111
112/** Whether a current VMCS is present for the given VCPU. */
113#define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
114
115/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
116#define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
117 do \
118 { \
119 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
120 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
121 } while (0)
122
123/** Clears any current VMCS for the given VCPU. */
124#define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
125 do \
126 { \
127 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
128 } while (0)
129
130/** Check for VMX instructions requiring to be in VMX operation.
131 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs updating. */
132#define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
133 do \
134 { \
135 if (IEM_VMX_IS_ROOT_MODE(a_pVCpu)) \
136 { /* likely */ } \
137 else \
138 { \
139 Log((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
140 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
141 return iemRaiseUndefinedOpcode(a_pVCpu); \
142 } \
143 } while (0)
144
145/** Marks a VM-entry failure with a diagnostic reason, logs and returns. */
146#define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag) \
147 do \
148 { \
149 Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_VmxDiag), \
150 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
151 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
152 return VERR_VMX_VMENTRY_FAILED; \
153 } while (0)
154
155/** Marks a VM-exit failure with a diagnostic reason, logs and returns. */
156#define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \
157 do \
158 { \
159 Log(("VM-exit failed! uExitReason=%u enmDiag=%u (%s) -> %s\n", (a_uExitReason), (a_VmxDiag), \
160 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
161 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
162 return VERR_VMX_VMEXIT_FAILED; \
163 } while (0)
164
165
166/*********************************************************************************************************************************
167* Global Variables *
168*********************************************************************************************************************************/
169/** @todo NSTVMX: The following VM-exit intercepts are pending:
170 * VMX_EXIT_IO_SMI
171 * VMX_EXIT_SMI
172 * VMX_EXIT_INT_WINDOW
173 * VMX_EXIT_NMI_WINDOW
174 * VMX_EXIT_GETSEC
175 * VMX_EXIT_RSM
176 * VMX_EXIT_MTF
177 * VMX_EXIT_MONITOR (APIC access VM-exit caused by MONITOR pending)
178 * VMX_EXIT_ERR_MACHINE_CHECK
179 * VMX_EXIT_TPR_BELOW_THRESHOLD
180 * VMX_EXIT_APIC_ACCESS
181 * VMX_EXIT_VIRTUALIZED_EOI
182 * VMX_EXIT_EPT_VIOLATION
183 * VMX_EXIT_EPT_MISCONFIG
184 * VMX_EXIT_INVEPT
185 * VMX_EXIT_PREEMPT_TIMER
186 * VMX_EXIT_INVVPID
187 * VMX_EXIT_APIC_WRITE
188 * VMX_EXIT_RDRAND
189 * VMX_EXIT_VMFUNC
190 * VMX_EXIT_ENCLS
191 * VMX_EXIT_RDSEED
192 * VMX_EXIT_PML_FULL
193 * VMX_EXIT_XSAVES
194 * VMX_EXIT_XRSTORS
195 */
196/**
197 * Map of VMCS field encodings to their virtual-VMCS structure offsets.
198 *
199 * The first array dimension is VMCS field encoding of Width OR'ed with Type and the
200 * second dimension is the Index, see VMXVMCSFIELDENC.
201 */
202uint16_t const g_aoffVmcsMap[16][VMX_V_VMCS_MAX_INDEX + 1] =
203{
204 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
205 {
206 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u16Vpid),
207 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u16PostIntNotifyVector),
208 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u16EptpIndex),
209 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
210 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
211 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
212 },
213 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
214 {
215 /* 0-7 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
216 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
217 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
218 /* 24-25 */ UINT16_MAX, UINT16_MAX
219 },
220 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
221 {
222 /* 0 */ RT_UOFFSETOF(VMXVVMCS, GuestEs),
223 /* 1 */ RT_UOFFSETOF(VMXVVMCS, GuestCs),
224 /* 2 */ RT_UOFFSETOF(VMXVVMCS, GuestSs),
225 /* 3 */ RT_UOFFSETOF(VMXVVMCS, GuestDs),
226 /* 4 */ RT_UOFFSETOF(VMXVVMCS, GuestFs),
227 /* 5 */ RT_UOFFSETOF(VMXVVMCS, GuestGs),
228 /* 6 */ RT_UOFFSETOF(VMXVVMCS, GuestLdtr),
229 /* 7 */ RT_UOFFSETOF(VMXVVMCS, GuestTr),
230 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u16GuestIntStatus),
231 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u16PmlIndex),
232 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
233 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
234 },
235 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
236 {
237 /* 0 */ RT_UOFFSETOF(VMXVVMCS, HostEs),
238 /* 1 */ RT_UOFFSETOF(VMXVVMCS, HostCs),
239 /* 2 */ RT_UOFFSETOF(VMXVVMCS, HostSs),
240 /* 3 */ RT_UOFFSETOF(VMXVVMCS, HostDs),
241 /* 4 */ RT_UOFFSETOF(VMXVVMCS, HostFs),
242 /* 5 */ RT_UOFFSETOF(VMXVVMCS, HostGs),
243 /* 6 */ RT_UOFFSETOF(VMXVVMCS, HostTr),
244 /* 7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
245 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
246 /* 23-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
247 },
248 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
249 {
250 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64AddrIoBitmapA),
251 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
252 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
253 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64AddrExitMsrStore),
254 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64AddrExitMsrLoad),
255 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEntryMsrLoad),
256 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
257 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64AddrPml),
258 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64TscOffset),
259 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVirtApic),
260 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64AddrApicAccess),
261 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64AddrPostedIntDesc),
262 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64VmFuncCtls),
263 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64EptpPtr),
264 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap0),
265 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap1),
266 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap2),
267 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap3),
268 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEptpList),
269 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVmreadBitmap),
270 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVmwriteBitmap),
271 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u64AddrXcptVeInfo),
272 /* 22 */ RT_UOFFSETOF(VMXVVMCS, u64XssBitmap),
273 /* 23 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEnclsBitmap),
274 /* 24 */ UINT16_MAX,
275 /* 25 */ RT_UOFFSETOF(VMXVVMCS, u64TscMultiplier)
276 },
277 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
278 {
279 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64RoGuestPhysAddr),
280 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
281 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
282 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
283 /* 25 */ UINT16_MAX
284 },
285 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
286 {
287 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
288 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
289 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPatMsr),
290 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64GuestEferMsr),
291 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
292 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte0),
293 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte1),
294 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte2),
295 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte3),
296 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
297 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
298 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
299 },
300 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
301 {
302 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64HostPatMsr),
303 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64HostEferMsr),
304 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
305 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
306 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
307 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
308 },
309 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
310 {
311 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32PinCtls),
312 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32ProcCtls),
313 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32XcptBitmap),
314 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32XcptPFMask),
315 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32XcptPFMatch),
316 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32Cr3TargetCount),
317 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32ExitCtls),
318 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
319 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
320 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u32EntryCtls),
321 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
322 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u32EntryIntInfo),
323 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
324 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u32EntryInstrLen),
325 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u32TprThreshold),
326 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u32ProcCtls2),
327 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u32PleGap),
328 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u32PleWindow),
329 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
330 },
331 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
332 {
333 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32RoVmInstrError),
334 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitReason),
335 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitIntInfo),
336 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitIntErrCode),
337 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
338 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
339 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitInstrLen),
340 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitInstrInfo),
341 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
342 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
343 /* 24-25 */ UINT16_MAX, UINT16_MAX
344 },
345 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
346 {
347 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsLimit),
348 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32GuestCsLimit),
349 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSsLimit),
350 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32GuestDsLimit),
351 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsLimit),
352 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32GuestFsLimit),
353 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGsLimit),
354 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
355 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u32GuestTrLimit),
356 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
357 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
358 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsAttr),
359 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u32GuestCsAttr),
360 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSsAttr),
361 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u32GuestDsAttr),
362 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u32GuestFsAttr),
363 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGsAttr),
364 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
365 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u32GuestTrAttr),
366 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u32GuestIntrState),
367 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u32GuestActivityState),
368 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSmBase),
369 /* 22 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSysenterCS),
370 /* 23 */ RT_UOFFSETOF(VMXVVMCS, u32PreemptTimer),
371 /* 24-25 */ UINT16_MAX, UINT16_MAX
372 },
373 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
374 {
375 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32HostSysenterCs),
376 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
377 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
378 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
379 /* 25 */ UINT16_MAX
380 },
381 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_CONTROL: */
382 {
383 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64Cr0Mask),
384 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64Cr4Mask),
385 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
386 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
387 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target0),
388 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target1),
389 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target2),
390 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target3),
391 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
392 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
393 /* 24-25 */ UINT16_MAX, UINT16_MAX
394 },
395 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
396 {
397 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64RoExitQual),
398 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRcx),
399 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRsi),
400 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRdi),
401 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRip),
402 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64RoGuestLinearAddr),
403 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
404 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
405 /* 22-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
406 },
407 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
408 {
409 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr0),
410 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr3),
411 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr4),
412 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64GuestEsBase),
413 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCsBase),
414 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSsBase),
415 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDsBase),
416 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64GuestFsBase),
417 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64GuestGsBase),
418 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64GuestLdtrBase),
419 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64GuestTrBase),
420 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64GuestGdtrBase),
421 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64GuestIdtrBase),
422 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDr7),
423 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRsp),
424 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRip),
425 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRFlags),
426 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpt),
427 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
428 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSysenterEip),
429 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
430 },
431 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_HOST_STATE: */
432 {
433 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr0),
434 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr3),
435 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr4),
436 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64HostFsBase),
437 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64HostGsBase),
438 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64HostTrBase),
439 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64HostGdtrBase),
440 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64HostIdtrBase),
441 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64HostSysenterEsp),
442 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64HostSysenterEip),
443 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64HostRsp),
444 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64HostRip),
445 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
446 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
447 }
448};
449
450
451/**
452 * Returns whether the given VMCS field is valid and supported by our emulation.
453 *
454 * @param pVCpu The cross context virtual CPU structure.
455 * @param u64FieldEnc The VMCS field encoding.
456 *
457 * @remarks This takes into account the CPU features exposed to the guest.
458 */
459IEM_STATIC bool iemVmxIsVmcsFieldValid(PVMCPU pVCpu, uint64_t u64FieldEnc)
460{
461 uint32_t const uFieldEncHi = RT_HI_U32(u64FieldEnc);
462 uint32_t const uFieldEncLo = RT_LO_U32(u64FieldEnc);
463 if (!uFieldEncHi)
464 { /* likely */ }
465 else
466 return false;
467
468 PCCPUMFEATURES pFeat = IEM_GET_GUEST_CPU_FEATURES(pVCpu);
469 switch (uFieldEncLo)
470 {
471 /*
472 * 16-bit fields.
473 */
474 /* Control fields. */
475 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
476 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
477 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
478
479 /* Guest-state fields. */
480 case VMX_VMCS16_GUEST_ES_SEL:
481 case VMX_VMCS16_GUEST_CS_SEL:
482 case VMX_VMCS16_GUEST_SS_SEL:
483 case VMX_VMCS16_GUEST_DS_SEL:
484 case VMX_VMCS16_GUEST_FS_SEL:
485 case VMX_VMCS16_GUEST_GS_SEL:
486 case VMX_VMCS16_GUEST_LDTR_SEL:
487 case VMX_VMCS16_GUEST_TR_SEL:
488 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
489 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
490
491 /* Host-state fields. */
492 case VMX_VMCS16_HOST_ES_SEL:
493 case VMX_VMCS16_HOST_CS_SEL:
494 case VMX_VMCS16_HOST_SS_SEL:
495 case VMX_VMCS16_HOST_DS_SEL:
496 case VMX_VMCS16_HOST_FS_SEL:
497 case VMX_VMCS16_HOST_GS_SEL:
498 case VMX_VMCS16_HOST_TR_SEL: return true;
499
500 /*
501 * 64-bit fields.
502 */
503 /* Control fields. */
504 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
505 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
506 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
507 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
508 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
509 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
510 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
511 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
512 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
513 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
514 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
515 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
516 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
517 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
518 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
519 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
520 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
521 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
522 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
523 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
524 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
525 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
526 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
527 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
528 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
529 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
530 case VMX_VMCS64_CTRL_EPTP_FULL:
531 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
532 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
533 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
534 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
535 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
536 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
537 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
538 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
539 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
540 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
541 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
542 {
543 uint64_t const uVmFuncMsr = CPUMGetGuestIa32VmxVmFunc(pVCpu);
544 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
545 }
546 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
547 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
548 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
549 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
550 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
551 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
552 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
553 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
554 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
555 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
556 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
557 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
558
559 /* Read-only data fields. */
560 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
561 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
562
563 /* Guest-state fields. */
564 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
565 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
566 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
567 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
568 case VMX_VMCS64_GUEST_PAT_FULL:
569 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
570 case VMX_VMCS64_GUEST_EFER_FULL:
571 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
572 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
573 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
574 case VMX_VMCS64_GUEST_PDPTE0_FULL:
575 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
576 case VMX_VMCS64_GUEST_PDPTE1_FULL:
577 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
578 case VMX_VMCS64_GUEST_PDPTE2_FULL:
579 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
580 case VMX_VMCS64_GUEST_PDPTE3_FULL:
581 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
582 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
583 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
584
585 /* Host-state fields. */
586 case VMX_VMCS64_HOST_PAT_FULL:
587 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
588 case VMX_VMCS64_HOST_EFER_FULL:
589 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
590 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
591 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
592
593 /*
594 * 32-bit fields.
595 */
596 /* Control fields. */
597 case VMX_VMCS32_CTRL_PIN_EXEC:
598 case VMX_VMCS32_CTRL_PROC_EXEC:
599 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
600 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
601 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
602 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
603 case VMX_VMCS32_CTRL_EXIT:
604 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
605 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
606 case VMX_VMCS32_CTRL_ENTRY:
607 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
608 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
609 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
610 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
611 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
612 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
613 case VMX_VMCS32_CTRL_PLE_GAP:
614 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
615
616 /* Read-only data fields. */
617 case VMX_VMCS32_RO_VM_INSTR_ERROR:
618 case VMX_VMCS32_RO_EXIT_REASON:
619 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
620 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
621 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
622 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
623 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
624 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
625
626 /* Guest-state fields. */
627 case VMX_VMCS32_GUEST_ES_LIMIT:
628 case VMX_VMCS32_GUEST_CS_LIMIT:
629 case VMX_VMCS32_GUEST_SS_LIMIT:
630 case VMX_VMCS32_GUEST_DS_LIMIT:
631 case VMX_VMCS32_GUEST_FS_LIMIT:
632 case VMX_VMCS32_GUEST_GS_LIMIT:
633 case VMX_VMCS32_GUEST_LDTR_LIMIT:
634 case VMX_VMCS32_GUEST_TR_LIMIT:
635 case VMX_VMCS32_GUEST_GDTR_LIMIT:
636 case VMX_VMCS32_GUEST_IDTR_LIMIT:
637 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
638 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
639 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
640 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
641 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
642 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
643 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
644 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
645 case VMX_VMCS32_GUEST_INT_STATE:
646 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
647 case VMX_VMCS32_GUEST_SMBASE:
648 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
649 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
650
651 /* Host-state fields. */
652 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
653
654 /*
655 * Natural-width fields.
656 */
657 /* Control fields. */
658 case VMX_VMCS_CTRL_CR0_MASK:
659 case VMX_VMCS_CTRL_CR4_MASK:
660 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
661 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
662 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
663 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
664 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
665 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
666
667 /* Read-only data fields. */
668 case VMX_VMCS_RO_EXIT_QUALIFICATION:
669 case VMX_VMCS_RO_IO_RCX:
670 case VMX_VMCS_RO_IO_RSX:
671 case VMX_VMCS_RO_IO_RDI:
672 case VMX_VMCS_RO_IO_RIP:
673 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
674
675 /* Guest-state fields. */
676 case VMX_VMCS_GUEST_CR0:
677 case VMX_VMCS_GUEST_CR3:
678 case VMX_VMCS_GUEST_CR4:
679 case VMX_VMCS_GUEST_ES_BASE:
680 case VMX_VMCS_GUEST_CS_BASE:
681 case VMX_VMCS_GUEST_SS_BASE:
682 case VMX_VMCS_GUEST_DS_BASE:
683 case VMX_VMCS_GUEST_FS_BASE:
684 case VMX_VMCS_GUEST_GS_BASE:
685 case VMX_VMCS_GUEST_LDTR_BASE:
686 case VMX_VMCS_GUEST_TR_BASE:
687 case VMX_VMCS_GUEST_GDTR_BASE:
688 case VMX_VMCS_GUEST_IDTR_BASE:
689 case VMX_VMCS_GUEST_DR7:
690 case VMX_VMCS_GUEST_RSP:
691 case VMX_VMCS_GUEST_RIP:
692 case VMX_VMCS_GUEST_RFLAGS:
693 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
694 case VMX_VMCS_GUEST_SYSENTER_ESP:
695 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
696
697 /* Host-state fields. */
698 case VMX_VMCS_HOST_CR0:
699 case VMX_VMCS_HOST_CR3:
700 case VMX_VMCS_HOST_CR4:
701 case VMX_VMCS_HOST_FS_BASE:
702 case VMX_VMCS_HOST_GS_BASE:
703 case VMX_VMCS_HOST_TR_BASE:
704 case VMX_VMCS_HOST_GDTR_BASE:
705 case VMX_VMCS_HOST_IDTR_BASE:
706 case VMX_VMCS_HOST_SYSENTER_ESP:
707 case VMX_VMCS_HOST_SYSENTER_EIP:
708 case VMX_VMCS_HOST_RSP:
709 case VMX_VMCS_HOST_RIP: return true;
710 }
711
712 return false;
713}
714
715
716/**
717 * Gets a host selector from the VMCS.
718 *
719 * @param pVmcs Pointer to the virtual VMCS.
720 * @param iSelReg The index of the segment register (X86_SREG_XXX).
721 */
722DECLINLINE(RTSEL) iemVmxVmcsGetHostSelReg(PCVMXVVMCS pVmcs, uint8_t iSegReg)
723{
724 Assert(iSegReg < X86_SREG_COUNT);
725 RTSEL HostSel;
726 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
727 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
728 uint8_t const uWidthType = (uWidth << 2) | uType;
729 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
730 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
731 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
732 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
733 uint8_t const *pbField = pbVmcs + offField;
734 HostSel = *(uint16_t *)pbField;
735 return HostSel;
736}
737
738
739/**
740 * Sets a guest segment register in the VMCS.
741 *
742 * @param pVmcs Pointer to the virtual VMCS.
743 * @param iSegReg The index of the segment register (X86_SREG_XXX).
744 * @param pSelReg Pointer to the segment register.
745 */
746IEM_STATIC void iemVmxVmcsSetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCCPUMSELREG pSelReg)
747{
748 Assert(pSelReg);
749 Assert(iSegReg < X86_SREG_COUNT);
750
751 /* Selector. */
752 {
753 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
754 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
755 uint8_t const uWidthType = (uWidth << 2) | uType;
756 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
757 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
758 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
759 uint8_t *pbVmcs = (uint8_t *)pVmcs;
760 uint8_t *pbField = pbVmcs + offField;
761 *(uint16_t *)pbField = pSelReg->Sel;
762 }
763
764 /* Limit. */
765 {
766 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
767 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
768 uint8_t const uWidthType = (uWidth << 2) | uType;
769 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
770 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
771 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
772 uint8_t *pbVmcs = (uint8_t *)pVmcs;
773 uint8_t *pbField = pbVmcs + offField;
774 *(uint32_t *)pbField = pSelReg->u32Limit;
775 }
776
777 /* Base. */
778 {
779 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
780 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
781 uint8_t const uWidthType = (uWidth << 2) | uType;
782 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
783 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
784 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
785 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
786 uint8_t const *pbField = pbVmcs + offField;
787 *(uint64_t *)pbField = pSelReg->u64Base;
788 }
789
790 /* Attributes. */
791 {
792 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
793 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
794 | X86DESCATTR_UNUSABLE;
795 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
796 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
797 uint8_t const uWidthType = (uWidth << 2) | uType;
798 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
799 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
800 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
801 uint8_t *pbVmcs = (uint8_t *)pVmcs;
802 uint8_t *pbField = pbVmcs + offField;
803 *(uint32_t *)pbField = pSelReg->Attr.u & fValidAttrMask;
804 }
805}
806
807
808/**
809 * Gets a guest segment register from the VMCS.
810 *
811 * @returns VBox status code.
812 * @param pVmcs Pointer to the virtual VMCS.
813 * @param iSegReg The index of the segment register (X86_SREG_XXX).
814 * @param pSelReg Where to store the segment register (only updated when
815 * VINF_SUCCESS is returned).
816 *
817 * @remarks Warning! This does not validate the contents of the retrieved segment
818 * register.
819 */
820IEM_STATIC int iemVmxVmcsGetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCPUMSELREG pSelReg)
821{
822 Assert(pSelReg);
823 Assert(iSegReg < X86_SREG_COUNT);
824
825 /* Selector. */
826 uint16_t u16Sel;
827 {
828 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
829 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
830 uint8_t const uWidthType = (uWidth << 2) | uType;
831 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
832 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
833 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
834 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
835 uint8_t const *pbField = pbVmcs + offField;
836 u16Sel = *(uint16_t *)pbField;
837 }
838
839 /* Limit. */
840 uint32_t u32Limit;
841 {
842 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
843 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
844 uint8_t const uWidthType = (uWidth << 2) | uType;
845 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
846 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
847 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
848 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
849 uint8_t const *pbField = pbVmcs + offField;
850 u32Limit = *(uint32_t *)pbField;
851 }
852
853 /* Base. */
854 uint64_t u64Base;
855 {
856 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
857 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
858 uint8_t const uWidthType = (uWidth << 2) | uType;
859 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
860 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
861 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
862 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
863 uint8_t const *pbField = pbVmcs + offField;
864 u64Base = *(uint64_t *)pbField;
865 /** @todo NSTVMX: Should we zero out high bits here for 32-bit virtual CPUs? */
866 }
867
868 /* Attributes. */
869 uint32_t u32Attr;
870 {
871 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
872 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
873 uint8_t const uWidthType = (uWidth << 2) | uType;
874 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
875 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
876 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
877 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
878 uint8_t const *pbField = pbVmcs + offField;
879 u32Attr = *(uint32_t *)pbField;
880 }
881
882 pSelReg->Sel = u16Sel;
883 pSelReg->ValidSel = u16Sel;
884 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
885 pSelReg->u32Limit = u32Limit;
886 pSelReg->u64Base = u64Base;
887 pSelReg->Attr.u = u32Attr;
888 return VINF_SUCCESS;
889}
890
891
892/**
893 * Gets a CR3 target value from the VMCS.
894 *
895 * @returns VBox status code.
896 * @param pVmcs Pointer to the virtual VMCS.
897 * @param idxCr3Target The index of the CR3-target value to retrieve.
898 * @param puValue Where to store the CR3-target value.
899 */
900DECLINLINE(uint64_t) iemVmxVmcsGetCr3TargetValue(PCVMXVVMCS pVmcs, uint8_t idxCr3Target)
901{
902 Assert(idxCr3Target < VMX_V_CR3_TARGET_COUNT);
903 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
904 uint8_t const uType = VMX_VMCS_ENC_TYPE_CONTROL;
905 uint8_t const uWidthType = (uWidth << 2) | uType;
906 uint8_t const uIndex = (idxCr3Target << 1) + RT_BF_GET(VMX_VMCS_CTRL_CR3_TARGET_VAL0, VMX_BF_VMCS_ENC_INDEX);
907 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
908 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
909 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
910 uint8_t const *pbField = pbVmcs + offField;
911 uint64_t const uCr3TargetValue = *(uint64_t *)pbField;
912
913 return uCr3TargetValue;
914}
915
916
917/**
918 * Converts an IEM exception event type to a VMX event type.
919 *
920 * @returns The VMX event type.
921 * @param uVector The interrupt / exception vector.
922 * @param fFlags The IEM event flag (see IEM_XCPT_FLAGS_XXX).
923 */
924DECLINLINE(uint8_t) iemVmxGetEventType(uint32_t uVector, uint32_t fFlags)
925{
926 /* Paranoia (callers may use these interchangeably). */
927 AssertCompile(VMX_EXIT_INT_INFO_TYPE_NMI == VMX_IDT_VECTORING_INFO_TYPE_NMI);
928 AssertCompile(VMX_EXIT_INT_INFO_TYPE_HW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT);
929 AssertCompile(VMX_EXIT_INT_INFO_TYPE_EXT_INT == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
930 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT);
931 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_INT == VMX_IDT_VECTORING_INFO_TYPE_SW_INT);
932 AssertCompile(VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
933 AssertCompile(VMX_EXIT_INT_INFO_TYPE_NMI == VMX_ENTRY_INT_INFO_TYPE_NMI);
934 AssertCompile(VMX_EXIT_INT_INFO_TYPE_HW_XCPT == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT);
935 AssertCompile(VMX_EXIT_INT_INFO_TYPE_EXT_INT == VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
936 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_XCPT == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT);
937 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_INT == VMX_ENTRY_INT_INFO_TYPE_SW_INT);
938 AssertCompile(VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT);
939
940 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
941 {
942 if (uVector == X86_XCPT_NMI)
943 return VMX_EXIT_INT_INFO_TYPE_NMI;
944 return VMX_EXIT_INT_INFO_TYPE_HW_XCPT;
945 }
946
947 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
948 {
949 if (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR))
950 return VMX_EXIT_INT_INFO_TYPE_SW_XCPT;
951 if (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
952 return VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT;
953 return VMX_EXIT_INT_INFO_TYPE_SW_INT;
954 }
955
956 Assert(fFlags & IEM_XCPT_FLAGS_T_EXT_INT);
957 return VMX_EXIT_INT_INFO_TYPE_EXT_INT;
958}
959
960
961/**
962 * Sets the VM-instruction error VMCS field.
963 *
964 * @param pVCpu The cross context virtual CPU structure.
965 * @param enmInsErr The VM-instruction error.
966 */
967DECL_FORCE_INLINE(void) iemVmxVmcsSetVmInstrErr(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
968{
969 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
970 pVmcs->u32RoVmInstrError = enmInsErr;
971}
972
973
974/**
975 * Sets the VM-exit qualification VMCS field.
976 *
977 * @param pVCpu The cross context virtual CPU structure.
978 * @param uExitQual The VM-exit qualification.
979 */
980DECL_FORCE_INLINE(void) iemVmxVmcsSetExitQual(PVMCPU pVCpu, uint64_t uExitQual)
981{
982 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
983 pVmcs->u64RoExitQual.u = uExitQual;
984}
985
986
987/**
988 * Sets the VM-exit interruption information field.
989 *
990 * @param pVCpu The cross context virtual CPU structure.
991 * @param uExitQual The VM-exit interruption information.
992 */
993DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntInfo(PVMCPU pVCpu, uint32_t uExitIntInfo)
994{
995 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
996 pVmcs->u32RoExitIntInfo = uExitIntInfo;
997}
998
999
1000/**
1001 * Sets the VM-exit interruption error code.
1002 *
1003 * @param pVCpu The cross context virtual CPU structure.
1004 * @param uErrCode The error code.
1005 */
1006DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntErrCode(PVMCPU pVCpu, uint32_t uErrCode)
1007{
1008 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1009 pVmcs->u32RoExitIntErrCode = uErrCode;
1010}
1011
1012
1013/**
1014 * Sets the IDT-vectoring information field.
1015 *
1016 * @param pVCpu The cross context virtual CPU structure.
1017 * @param uIdtVectorInfo The IDT-vectoring information.
1018 */
1019DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringInfo(PVMCPU pVCpu, uint32_t uIdtVectorInfo)
1020{
1021 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1022 pVmcs->u32RoIdtVectoringInfo = uIdtVectorInfo;
1023}
1024
1025
1026/**
1027 * Sets the IDT-vectoring error code field.
1028 *
1029 * @param pVCpu The cross context virtual CPU structure.
1030 * @param uErrCode The error code.
1031 */
1032DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringErrCode(PVMCPU pVCpu, uint32_t uErrCode)
1033{
1034 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1035 pVmcs->u32RoIdtVectoringErrCode = uErrCode;
1036}
1037
1038
1039/**
1040 * Sets the VM-exit guest-linear address VMCS field.
1041 *
1042 * @param pVCpu The cross context virtual CPU structure.
1043 * @param uGuestLinearAddr The VM-exit guest-linear address.
1044 */
1045DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestLinearAddr(PVMCPU pVCpu, uint64_t uGuestLinearAddr)
1046{
1047 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1048 pVmcs->u64RoGuestLinearAddr.u = uGuestLinearAddr;
1049}
1050
1051
1052/**
1053 * Sets the VM-exit guest-physical address VMCS field.
1054 *
1055 * @param pVCpu The cross context virtual CPU structure.
1056 * @param uGuestPhysAddr The VM-exit guest-physical address.
1057 */
1058DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestPhysAddr(PVMCPU pVCpu, uint64_t uGuestPhysAddr)
1059{
1060 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1061 pVmcs->u64RoGuestPhysAddr.u = uGuestPhysAddr;
1062}
1063
1064
1065/**
1066 * Sets the VM-exit instruction length VMCS field.
1067 *
1068 * @param pVCpu The cross context virtual CPU structure.
1069 * @param cbInstr The VM-exit instruction length in bytes.
1070 *
1071 * @remarks Callers may clear this field to 0. Hence, this function does not check
1072 * the validity of the instruction length.
1073 */
1074DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrLen(PVMCPU pVCpu, uint32_t cbInstr)
1075{
1076 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1077 pVmcs->u32RoExitInstrLen = cbInstr;
1078}
1079
1080
1081/**
1082 * Sets the VM-exit instruction info. VMCS field.
1083 *
1084 * @param pVCpu The cross context virtual CPU structure.
1085 * @param uExitInstrInfo The VM-exit instruction information.
1086 */
1087DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitInstrInfo)
1088{
1089 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1090 pVmcs->u32RoExitInstrInfo = uExitInstrInfo;
1091}
1092
1093
1094/**
1095 * Implements VMSucceed for VMX instruction success.
1096 *
1097 * @param pVCpu The cross context virtual CPU structure.
1098 */
1099DECL_FORCE_INLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
1100{
1101 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1102}
1103
1104
1105/**
1106 * Implements VMFailInvalid for VMX instruction failure.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 */
1110DECL_FORCE_INLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
1111{
1112 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1113 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
1114}
1115
1116
1117/**
1118 * Implements VMFailValid for VMX instruction failure.
1119 *
1120 * @param pVCpu The cross context virtual CPU structure.
1121 * @param enmInsErr The VM instruction error.
1122 */
1123DECL_FORCE_INLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1124{
1125 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1126 {
1127 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1128 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
1129 iemVmxVmcsSetVmInstrErr(pVCpu, enmInsErr);
1130 }
1131}
1132
1133
1134/**
1135 * Implements VMFail for VMX instruction failure.
1136 *
1137 * @param pVCpu The cross context virtual CPU structure.
1138 * @param enmInsErr The VM instruction error.
1139 */
1140DECL_FORCE_INLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1141{
1142 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1143 iemVmxVmFailValid(pVCpu, enmInsErr);
1144 else
1145 iemVmxVmFailInvalid(pVCpu);
1146}
1147
1148
1149/**
1150 * Checks if the given auto-load/store MSR area count is valid for the
1151 * implementation.
1152 *
1153 * @returns @c true if it's within the valid limit, @c false otherwise.
1154 * @param pVCpu The cross context virtual CPU structure.
1155 * @param uMsrCount The MSR area count to check.
1156 */
1157DECL_FORCE_INLINE(bool) iemVmxIsAutoMsrCountValid(PVMCPU pVCpu, uint32_t uMsrCount)
1158{
1159 uint64_t const u64VmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
1160 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(u64VmxMiscMsr);
1161 Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
1162 if (uMsrCount <= cMaxSupportedMsrs)
1163 return true;
1164 return false;
1165}
1166
1167
1168/**
1169 * Flushes the current VMCS contents back to guest memory.
1170 *
1171 * @returns VBox status code.
1172 * @param pVCpu The cross context virtual CPU structure.
1173 */
1174DECL_FORCE_INLINE(int) iemVmxCommitCurrentVmcsToMemory(PVMCPU pVCpu)
1175{
1176 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1177 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
1178 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));
1179 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
1180 return rc;
1181}
1182
1183
1184/**
1185 * Implements VMSucceed for the VMREAD instruction and increments the guest RIP.
1186 *
1187 * @param pVCpu The cross context virtual CPU structure.
1188 */
1189DECL_FORCE_INLINE(void) iemVmxVmreadSuccess(PVMCPU pVCpu, uint8_t cbInstr)
1190{
1191 iemVmxVmSucceed(pVCpu);
1192 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1193}
1194
1195
1196/**
1197 * Gets the instruction diagnostic for segment base checks during VM-entry of a
1198 * nested-guest.
1199 *
1200 * @param iSegReg The segment index (X86_SREG_XXX).
1201 */
1202IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBase(unsigned iSegReg)
1203{
1204 switch (iSegReg)
1205 {
1206 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseCs;
1207 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseDs;
1208 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseEs;
1209 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseFs;
1210 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseGs;
1211 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseSs;
1212 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_1);
1213 }
1214}
1215
1216
1217/**
1218 * Gets the instruction diagnostic for segment base checks during VM-entry of a
1219 * nested-guest that is in Virtual-8086 mode.
1220 *
1221 * @param iSegReg The segment index (X86_SREG_XXX).
1222 */
1223IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBaseV86(unsigned iSegReg)
1224{
1225 switch (iSegReg)
1226 {
1227 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseV86Cs;
1228 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ds;
1229 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseV86Es;
1230 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseV86Fs;
1231 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseV86Gs;
1232 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ss;
1233 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
1234 }
1235}
1236
1237
1238/**
1239 * Gets the instruction diagnostic for segment limit checks during VM-entry of a
1240 * nested-guest that is in Virtual-8086 mode.
1241 *
1242 * @param iSegReg The segment index (X86_SREG_XXX).
1243 */
1244IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegLimitV86(unsigned iSegReg)
1245{
1246 switch (iSegReg)
1247 {
1248 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegLimitV86Cs;
1249 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ds;
1250 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegLimitV86Es;
1251 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegLimitV86Fs;
1252 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegLimitV86Gs;
1253 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ss;
1254 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_3);
1255 }
1256}
1257
1258
1259/**
1260 * Gets the instruction diagnostic for segment attribute checks during VM-entry of a
1261 * nested-guest that is in Virtual-8086 mode.
1262 *
1263 * @param iSegReg The segment index (X86_SREG_XXX).
1264 */
1265IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrV86(unsigned iSegReg)
1266{
1267 switch (iSegReg)
1268 {
1269 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrV86Cs;
1270 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ds;
1271 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrV86Es;
1272 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrV86Fs;
1273 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrV86Gs;
1274 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ss;
1275 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_4);
1276 }
1277}
1278
1279
1280/**
1281 * Gets the instruction diagnostic for segment attributes reserved bits failure
1282 * during VM-entry of a nested-guest.
1283 *
1284 * @param iSegReg The segment index (X86_SREG_XXX).
1285 */
1286IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrRsvd(unsigned iSegReg)
1287{
1288 switch (iSegReg)
1289 {
1290 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdCs;
1291 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdDs;
1292 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrRsvdEs;
1293 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdFs;
1294 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdGs;
1295 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdSs;
1296 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_5);
1297 }
1298}
1299
1300
1301/**
1302 * Gets the instruction diagnostic for segment attributes descriptor-type
1303 * (code/segment or system) failure during VM-entry of a nested-guest.
1304 *
1305 * @param iSegReg The segment index (X86_SREG_XXX).
1306 */
1307IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDescType(unsigned iSegReg)
1308{
1309 switch (iSegReg)
1310 {
1311 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeCs;
1312 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeDs;
1313 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeEs;
1314 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeFs;
1315 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeGs;
1316 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs;
1317 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_6);
1318 }
1319}
1320
1321
1322/**
1323 * Gets the instruction diagnostic for segment attributes descriptor-type
1324 * (code/segment or system) failure during VM-entry of a nested-guest.
1325 *
1326 * @param iSegReg The segment index (X86_SREG_XXX).
1327 */
1328IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrPresent(unsigned iSegReg)
1329{
1330 switch (iSegReg)
1331 {
1332 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrPresentCs;
1333 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrPresentDs;
1334 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrPresentEs;
1335 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrPresentFs;
1336 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrPresentGs;
1337 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrPresentSs;
1338 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_7);
1339 }
1340}
1341
1342
1343/**
1344 * Gets the instruction diagnostic for segment attribute granularity failure during
1345 * VM-entry of a nested-guest.
1346 *
1347 * @param iSegReg The segment index (X86_SREG_XXX).
1348 */
1349IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrGran(unsigned iSegReg)
1350{
1351 switch (iSegReg)
1352 {
1353 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrGranCs;
1354 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrGranDs;
1355 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrGranEs;
1356 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrGranFs;
1357 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrGranGs;
1358 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrGranSs;
1359 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_8);
1360 }
1361}
1362
1363/**
1364 * Gets the instruction diagnostic for segment attribute DPL/RPL failure during
1365 * VM-entry of a nested-guest.
1366 *
1367 * @param iSegReg The segment index (X86_SREG_XXX).
1368 */
1369IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDplRpl(unsigned iSegReg)
1370{
1371 switch (iSegReg)
1372 {
1373 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplCs;
1374 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplDs;
1375 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDplRplEs;
1376 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplFs;
1377 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplGs;
1378 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplSs;
1379 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_9);
1380 }
1381}
1382
1383
1384/**
1385 * Gets the instruction diagnostic for segment attribute type accessed failure
1386 * during VM-entry of a nested-guest.
1387 *
1388 * @param iSegReg The segment index (X86_SREG_XXX).
1389 */
1390IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrTypeAcc(unsigned iSegReg)
1391{
1392 switch (iSegReg)
1393 {
1394 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccCs;
1395 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccDs;
1396 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccEs;
1397 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccFs;
1398 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccGs;
1399 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs;
1400 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_10);
1401 }
1402}
1403
1404
1405/**
1406 * Gets the instruction diagnostic for guest CR3 referenced PDPTE reserved bits
1407 * failure during VM-entry of a nested-guest.
1408 *
1409 * @param iSegReg The PDPTE entry index.
1410 */
1411IEM_STATIC VMXVDIAG iemVmxGetDiagVmentryPdpteRsvd(unsigned iPdpte)
1412{
1413 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
1414 switch (iPdpte)
1415 {
1416 case 0: return kVmxVDiag_Vmentry_GuestPdpte0Rsvd;
1417 case 1: return kVmxVDiag_Vmentry_GuestPdpte1Rsvd;
1418 case 2: return kVmxVDiag_Vmentry_GuestPdpte2Rsvd;
1419 case 3: return kVmxVDiag_Vmentry_GuestPdpte3Rsvd;
1420 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_11);
1421 }
1422}
1423
1424
1425/**
1426 * Gets the instruction diagnostic for host CR3 referenced PDPTE reserved bits
1427 * failure during VM-exit of a nested-guest.
1428 *
1429 * @param iSegReg The PDPTE entry index.
1430 */
1431IEM_STATIC VMXVDIAG iemVmxGetDiagVmexitPdpteRsvd(unsigned iPdpte)
1432{
1433 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
1434 switch (iPdpte)
1435 {
1436 case 0: return kVmxVDiag_Vmexit_HostPdpte0Rsvd;
1437 case 1: return kVmxVDiag_Vmexit_HostPdpte1Rsvd;
1438 case 2: return kVmxVDiag_Vmexit_HostPdpte2Rsvd;
1439 case 3: return kVmxVDiag_Vmexit_HostPdpte3Rsvd;
1440 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_12);
1441 }
1442}
1443
1444
1445/**
1446 * Masks the nested-guest CR0/CR4 mask subjected to the corresponding guest/host
1447 * mask and the read-shadow (CR0/CR4 read).
1448 *
1449 * @returns The masked CR0/CR4.
1450 * @param pVCpu The cross context virtual CPU structure.
1451 * @param iCrReg The control register (either CR0 or CR4).
1452 * @param uGuestCrX The current guest CR0 or guest CR4.
1453 */
1454IEM_STATIC uint64_t iemVmxMaskCr0CR4(PVMCPU pVCpu, uint8_t iCrReg, uint64_t uGuestCrX)
1455{
1456 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
1457 Assert(iCrReg == 0 || iCrReg == 4);
1458
1459 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1460 Assert(pVmcs);
1461
1462 /*
1463 * For each CR0 or CR4 bit owned by the host, the corresponding bit is loaded from the
1464 * CR0 read shadow or CR4 read shadow. For each CR0 or CR4 bit that is not owned by the
1465 * host, the corresponding bit from the guest CR0 or guest CR4 is loaded.
1466 *
1467 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
1468 */
1469 uint64_t fGstHostMask;
1470 uint64_t fReadShadow;
1471 if (iCrReg == 0)
1472 {
1473 fGstHostMask = pVmcs->u64Cr0Mask.u;
1474 fReadShadow = pVmcs->u64Cr0ReadShadow.u;
1475 }
1476 else
1477 {
1478 fGstHostMask = pVmcs->u64Cr4Mask.u;
1479 fReadShadow = pVmcs->u64Cr4ReadShadow.u;
1480 }
1481
1482 uint64_t const fMaskedCrX = (fReadShadow & fGstHostMask) | (uGuestCrX & ~fGstHostMask);
1483 return fMaskedCrX;
1484}
1485
1486
1487/**
1488 * Saves the guest control registers, debug registers and some MSRs are part of
1489 * VM-exit.
1490 *
1491 * @param pVCpu The cross context virtual CPU structure.
1492 */
1493IEM_STATIC void iemVmxVmexitSaveGuestControlRegsMsrs(PVMCPU pVCpu)
1494{
1495 /*
1496 * Saves the guest control registers, debug registers and some MSRs.
1497 * See Intel spec. 27.3.1 "Saving Control Registers, Debug Registers and MSRs".
1498 */
1499 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1500
1501 /* Save control registers. */
1502 pVmcs->u64GuestCr0.u = pVCpu->cpum.GstCtx.cr0;
1503 pVmcs->u64GuestCr3.u = pVCpu->cpum.GstCtx.cr3;
1504 pVmcs->u64GuestCr4.u = pVCpu->cpum.GstCtx.cr4;
1505
1506 /* Save SYSENTER CS, ESP, EIP. */
1507 pVmcs->u32GuestSysenterCS = pVCpu->cpum.GstCtx.SysEnter.cs;
1508 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1509 {
1510 pVmcs->u64GuestSysenterEsp.u = pVCpu->cpum.GstCtx.SysEnter.esp;
1511 pVmcs->u64GuestSysenterEip.u = pVCpu->cpum.GstCtx.SysEnter.eip;
1512 }
1513 else
1514 {
1515 pVmcs->u64GuestSysenterEsp.s.Lo = pVCpu->cpum.GstCtx.SysEnter.esp;
1516 pVmcs->u64GuestSysenterEip.s.Lo = pVCpu->cpum.GstCtx.SysEnter.eip;
1517 }
1518
1519 /* Save debug registers (DR7 and IA32_DEBUGCTL MSR). */
1520 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG)
1521 {
1522 pVmcs->u64GuestDr7.u = pVCpu->cpum.GstCtx.dr[7];
1523 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
1524 }
1525
1526 /* Save PAT MSR. */
1527 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR)
1528 pVmcs->u64GuestPatMsr.u = pVCpu->cpum.GstCtx.msrPAT;
1529
1530 /* Save EFER MSR. */
1531 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR)
1532 pVmcs->u64GuestEferMsr.u = pVCpu->cpum.GstCtx.msrEFER;
1533
1534 /* We don't support clearing IA32_BNDCFGS MSR yet. */
1535 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR));
1536
1537 /* Nothing to do for SMBASE register - We don't support SMM yet. */
1538}
1539
1540
1541/**
1542 * Saves the guest force-flags in preparation of entering the nested-guest.
1543 *
1544 * @param pVCpu The cross context virtual CPU structure.
1545 */
1546IEM_STATIC void iemVmxVmentrySaveForceFlags(PVMCPU pVCpu)
1547{
1548 /* We shouldn't be called multiple times during VM-entry. */
1549 Assert(pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions == 0);
1550
1551 /* MTF should not be set outside VMX non-root mode. */
1552 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
1553
1554 /*
1555 * Preserve the required force-flags.
1556 *
1557 * We cache and clear force-flags that would affect the execution of the
1558 * nested-guest. Cached flags are then restored while returning to the guest
1559 * if necessary.
1560 *
1561 * - VMCPU_FF_INHIBIT_INTERRUPTS need not be cached as it only affects
1562 * interrupts until the completion of the current VMLAUNCH/VMRESUME
1563 * instruction. Interrupt inhibition for any nested-guest instruction
1564 * will be set later while loading the guest-interruptibility state.
1565 *
1566 * - VMCPU_FF_BLOCK_NMIS needs to be cached as VM-exits caused before
1567 * successful VM-entry needs to continue blocking NMIs if it was in effect
1568 * during VM-entry.
1569 *
1570 * - MTF need not be preserved as it's used only in VMX non-root mode and
1571 * is supplied on VM-entry through the VM-execution controls.
1572 *
1573 * The remaining FFs (e.g. timers, APIC updates) must stay in place so that
1574 * we will be able to generate interrupts that may cause VM-exits for
1575 * the nested-guest.
1576 */
1577 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
1578
1579 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_BLOCK_NMIS))
1580 VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_BLOCK_NMIS);
1581}
1582
1583
1584/**
1585 * Restores the guest force-flags in preparation of exiting the nested-guest.
1586 *
1587 * @param pVCpu The cross context virtual CPU structure.
1588 */
1589IEM_STATIC void iemVmxVmexitRestoreForceFlags(PVMCPU pVCpu)
1590{
1591 if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
1592 {
1593 VMCPU_FF_SET_MASK(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);
1594 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;
1595 }
1596}
1597
1598
1599/**
1600 * Perform a VMX transition updated PGM, IEM and CPUM.
1601 *
1602 * @param pVCpu The cross context virtual CPU structure.
1603 */
1604IEM_STATIC int iemVmxWorldSwitch(PVMCPU pVCpu)
1605{
1606 /*
1607 * Inform PGM about paging mode changes.
1608 * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
1609 * see comment in iemMemPageTranslateAndCheckAccess().
1610 */
1611 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
1612# ifdef IN_RING3
1613 Assert(rc != VINF_PGM_CHANGE_MODE);
1614# endif
1615 AssertRCReturn(rc, rc);
1616
1617 /* Inform CPUM (recompiler), can later be removed. */
1618 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1619
1620 /*
1621 * Flush the TLB with new CR3. This is required in case the PGM mode change
1622 * above doesn't actually change anything.
1623 */
1624 if (rc == VINF_SUCCESS)
1625 {
1626 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true);
1627 AssertRCReturn(rc, rc);
1628 }
1629
1630 /* Re-initialize IEM cache/state after the drastic mode switch. */
1631 iemReInitExec(pVCpu);
1632 return rc;
1633}
1634
1635
1636/**
1637 * Saves guest segment registers, GDTR, IDTR, LDTR, TR as part of VM-exit.
1638 *
1639 * @param pVCpu The cross context virtual CPU structure.
1640 */
1641IEM_STATIC void iemVmxVmexitSaveGuestSegRegs(PVMCPU pVCpu)
1642{
1643 /*
1644 * Save guest segment registers, GDTR, IDTR, LDTR, TR.
1645 * See Intel spec 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
1646 */
1647 /* CS, SS, ES, DS, FS, GS. */
1648 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1649 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
1650 {
1651 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1652 if (!pSelReg->Attr.n.u1Unusable)
1653 iemVmxVmcsSetGuestSegReg(pVmcs, iSegReg, pSelReg);
1654 else
1655 {
1656 /*
1657 * For unusable segments the attributes are undefined except for CS and SS.
1658 * For the rest we don't bother preserving anything but the unusable bit.
1659 */
1660 switch (iSegReg)
1661 {
1662 case X86_SREG_CS:
1663 pVmcs->GuestCs = pSelReg->Sel;
1664 pVmcs->u64GuestCsBase.u = pSelReg->u64Base;
1665 pVmcs->u32GuestCsLimit = pSelReg->u32Limit;
1666 pVmcs->u32GuestCsAttr = pSelReg->Attr.u & ( X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
1667 | X86DESCATTR_UNUSABLE);
1668 break;
1669
1670 case X86_SREG_SS:
1671 pVmcs->GuestSs = pSelReg->Sel;
1672 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1673 pVmcs->u64GuestSsBase.u &= UINT32_C(0xffffffff);
1674 pVmcs->u32GuestSsAttr = pSelReg->Attr.u & (X86DESCATTR_DPL | X86DESCATTR_UNUSABLE);
1675 break;
1676
1677 case X86_SREG_DS:
1678 pVmcs->GuestDs = pSelReg->Sel;
1679 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1680 pVmcs->u64GuestDsBase.u &= UINT32_C(0xffffffff);
1681 pVmcs->u32GuestDsAttr = X86DESCATTR_UNUSABLE;
1682 break;
1683
1684 case X86_SREG_ES:
1685 pVmcs->GuestEs = pSelReg->Sel;
1686 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1687 pVmcs->u64GuestEsBase.u &= UINT32_C(0xffffffff);
1688 pVmcs->u32GuestEsAttr = X86DESCATTR_UNUSABLE;
1689 break;
1690
1691 case X86_SREG_FS:
1692 pVmcs->GuestFs = pSelReg->Sel;
1693 pVmcs->u64GuestFsBase.u = pSelReg->u64Base;
1694 pVmcs->u32GuestFsAttr = X86DESCATTR_UNUSABLE;
1695 break;
1696
1697 case X86_SREG_GS:
1698 pVmcs->GuestGs = pSelReg->Sel;
1699 pVmcs->u64GuestGsBase.u = pSelReg->u64Base;
1700 pVmcs->u32GuestGsAttr = X86DESCATTR_UNUSABLE;
1701 break;
1702 }
1703 }
1704 }
1705
1706 /* Segment attribute bits 31:7 and 11:8 MBZ. */
1707 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
1708 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_UNUSABLE;
1709 /* LDTR. */
1710 {
1711 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.ldtr;
1712 pVmcs->GuestLdtr = pSelReg->Sel;
1713 pVmcs->u64GuestLdtrBase.u = pSelReg->u64Base;
1714 Assert(X86_IS_CANONICAL(pSelReg->u64Base));
1715 pVmcs->u32GuestLdtrLimit = pSelReg->u32Limit;
1716 pVmcs->u32GuestLdtrAttr = pSelReg->Attr.u & fValidAttrMask;
1717 }
1718
1719 /* TR. */
1720 {
1721 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.tr;
1722 pVmcs->GuestTr = pSelReg->Sel;
1723 pVmcs->u64GuestTrBase.u = pSelReg->u64Base;
1724 pVmcs->u32GuestTrLimit = pSelReg->u32Limit;
1725 pVmcs->u32GuestTrAttr = pSelReg->Attr.u & fValidAttrMask;
1726 }
1727
1728 /* GDTR. */
1729 pVmcs->u64GuestGdtrBase.u = pVCpu->cpum.GstCtx.gdtr.pGdt;
1730 pVmcs->u32GuestGdtrLimit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
1731
1732 /* IDTR. */
1733 pVmcs->u64GuestIdtrBase.u = pVCpu->cpum.GstCtx.idtr.pIdt;
1734 pVmcs->u32GuestIdtrLimit = pVCpu->cpum.GstCtx.idtr.cbIdt;
1735}
1736
1737
1738/**
1739 * Saves guest non-register state as part of VM-exit.
1740 *
1741 * @param pVCpu The cross context virtual CPU structure.
1742 * @param uExitReason The VM-exit reason.
1743 */
1744IEM_STATIC void iemVmxVmexitSaveGuestNonRegState(PVMCPU pVCpu, uint32_t uExitReason)
1745{
1746 /*
1747 * Save guest non-register state.
1748 * See Intel spec. 27.3.4 "Saving Non-Register State".
1749 */
1750 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1751
1752 /* Activity-state: VM-exits occur before changing the activity state, nothing further to do */
1753
1754 /* Interruptibility-state. */
1755 pVmcs->u32GuestIntrState = 0;
1756 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
1757 { /** @todo NSTVMX: Virtual-NMI blocking. */ }
1758 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1759 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1760
1761 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1762 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
1763 {
1764 /** @todo NSTVMX: We can't distinguish between blocking-by-MovSS and blocking-by-STI
1765 * currently. */
1766 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1767 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1768 }
1769 /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */
1770
1771 /* Pending debug exceptions. */
1772 if ( uExitReason != VMX_EXIT_INIT_SIGNAL
1773 && uExitReason != VMX_EXIT_SMI
1774 && uExitReason != VMX_EXIT_ERR_MACHINE_CHECK
1775 && !HMVmxIsTrapLikeVmexit(uExitReason))
1776 {
1777 /** @todo NSTVMX: also must exclude VM-exits caused by debug exceptions when
1778 * block-by-MovSS is in effect. */
1779 pVmcs->u64GuestPendingDbgXcpt.u = 0;
1780 }
1781
1782 /* Save VMX-preemption timer value. */
1783 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER)
1784 {
1785 uint32_t uPreemptTimer;
1786 if (uExitReason == VMX_EXIT_PREEMPT_TIMER)
1787 uPreemptTimer = 0;
1788 else
1789 {
1790 /*
1791 * Assume the following:
1792 * PreemptTimerShift = 5
1793 * VmcsPreemptTimer = 2 (i.e. need to decrement by 1 every 2 * RT_BIT(5) = 20000 TSC ticks)
1794 * VmentryTick = 50000 (TSC at time of VM-entry)
1795 *
1796 * CurTick Delta PreemptTimerVal
1797 * ----------------------------------
1798 * 60000 10000 2
1799 * 80000 30000 1
1800 * 90000 40000 0 -> VM-exit.
1801 *
1802 * If Delta >= VmcsPreemptTimer * RT_BIT(PreemptTimerShift) cause a VMX-preemption timer VM-exit.
1803 * The saved VMX-preemption timer value is calculated as follows:
1804 * PreemptTimerVal = VmcsPreemptTimer - (Delta / (VmcsPreemptTimer * RT_BIT(PreemptTimerShift)))
1805 * E.g.:
1806 * Delta = 10000
1807 * Tmp = 10000 / (2 * 10000) = 0.5
1808 * NewPt = 2 - 0.5 = 2
1809 * Delta = 30000
1810 * Tmp = 30000 / (2 * 10000) = 1.5
1811 * NewPt = 2 - 1.5 = 1
1812 * Delta = 40000
1813 * Tmp = 40000 / 20000 = 2
1814 * NewPt = 2 - 2 = 0
1815 */
1816 uint64_t const uCurTick = TMCpuTickGetNoCheck(pVCpu);
1817 uint64_t const uVmentryTick = pVCpu->cpum.GstCtx.hwvirt.vmx.uVmentryTick;
1818 uint64_t const uDelta = uCurTick - uVmentryTick;
1819 uint32_t const uVmcsPreemptVal = pVmcs->u32PreemptTimer;
1820 uPreemptTimer = uVmcsPreemptVal - ASMDivU64ByU32RetU32(uDelta, uVmcsPreemptVal * RT_BIT(VMX_V_PREEMPT_TIMER_SHIFT));
1821 }
1822
1823 pVmcs->u32PreemptTimer = uPreemptTimer;
1824 }
1825
1826
1827 /* PDPTEs. */
1828 /* We don't support EPT yet. */
1829 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
1830 pVmcs->u64GuestPdpte0.u = 0;
1831 pVmcs->u64GuestPdpte1.u = 0;
1832 pVmcs->u64GuestPdpte2.u = 0;
1833 pVmcs->u64GuestPdpte3.u = 0;
1834}
1835
1836
1837/**
1838 * Saves the guest-state as part of VM-exit.
1839 *
1840 * @returns VBox status code.
1841 * @param pVCpu The cross context virtual CPU structure.
1842 * @param uExitReason The VM-exit reason.
1843 */
1844IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPU pVCpu, uint32_t uExitReason)
1845{
1846 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1847 Assert(pVmcs);
1848
1849 iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu);
1850 iemVmxVmexitSaveGuestSegRegs(pVCpu);
1851
1852 /** @todo r=ramshankar: The below hack is no longer necessary because we invoke the
1853 * VM-exit after updating RIP. I'm leaving it in-place temporarily in case
1854 * we need to fix missing exit information or callers still setting
1855 * instruction-length field when it is not necessary. */
1856#if 0
1857 /*
1858 * Save guest RIP, RSP and RFLAGS.
1859 * See Intel spec. 27.3.3 "Saving RIP, RSP and RFLAGS".
1860 *
1861 * For trap-like VM-exits we must advance the RIP by the length of the instruction.
1862 * Callers must pass the instruction length in the VM-exit instruction length
1863 * field though it is undefined for such VM-exits. After updating RIP here, we clear
1864 * the VM-exit instruction length field.
1865 *
1866 * See Intel spec. 27.1 "Architectural State Before A VM Exit"
1867 */
1868 if (HMVmxIsTrapLikeVmexit(uExitReason))
1869 {
1870 uint8_t const cbInstr = pVmcs->u32RoExitInstrLen;
1871 AssertMsg(cbInstr >= 1 && cbInstr <= 15, ("uReason=%u cbInstr=%u\n", uExitReason, cbInstr));
1872 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1873 iemVmxVmcsSetExitInstrLen(pVCpu, 0 /* cbInstr */);
1874 }
1875#endif
1876
1877 /* We don't support enclave mode yet. */
1878 pVmcs->u64GuestRip.u = pVCpu->cpum.GstCtx.rip;
1879 pVmcs->u64GuestRsp.u = pVCpu->cpum.GstCtx.rsp;
1880 pVmcs->u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u; /** @todo NSTVMX: Check RFLAGS.RF handling. */
1881
1882 iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason);
1883}
1884
1885
1886/**
1887 * Saves the guest MSRs into the VM-exit auto-store MSRs area as part of VM-exit.
1888 *
1889 * @returns VBox status code.
1890 * @param pVCpu The cross context virtual CPU structure.
1891 * @param uExitReason The VM-exit reason (for diagnostic purposes).
1892 */
1893IEM_STATIC int iemVmxVmexitSaveGuestAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
1894{
1895 /*
1896 * Save guest MSRs.
1897 * See Intel spec. 27.4 "Saving MSRs".
1898 */
1899 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1900 const char *const pszFailure = "VMX-abort";
1901
1902 /*
1903 * The VM-exit MSR-store area address need not be a valid guest-physical address if the
1904 * VM-exit MSR-store count is 0. If this is the case, bail early without reading it.
1905 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
1906 */
1907 uint32_t const cMsrs = pVmcs->u32ExitMsrStoreCount;
1908 if (!cMsrs)
1909 return VINF_SUCCESS;
1910
1911 /*
1912 * Verify the MSR auto-store count. Physical CPUs can behave unpredictably if the count
1913 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
1914 * implementation causes a VMX-abort followed by a triple-fault.
1915 */
1916 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
1917 if (fIsMsrCountValid)
1918 { /* likely */ }
1919 else
1920 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount);
1921
1922 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
1923 Assert(pMsr);
1924 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
1925 {
1926 if ( !pMsr->u32Reserved
1927 && pMsr->u32Msr != MSR_IA32_SMBASE
1928 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
1929 {
1930 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pMsr->u32Msr, &pMsr->u64Value);
1931 if (rcStrict == VINF_SUCCESS)
1932 continue;
1933
1934 /*
1935 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
1936 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
1937 * recording the MSR index in the auxiliary info. field and indicated further by our
1938 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
1939 * if possible, or come up with a better, generic solution.
1940 */
1941 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
1942 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_READ
1943 ? kVmxVDiag_Vmexit_MsrStoreRing3
1944 : kVmxVDiag_Vmexit_MsrStore;
1945 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
1946 }
1947 else
1948 {
1949 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
1950 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreRsvd);
1951 }
1952 }
1953
1954 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrStore.u;
1955 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysAutoMsrArea,
1956 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), VMX_V_AUTOMSR_AREA_SIZE);
1957 if (RT_SUCCESS(rc))
1958 { /* likely */ }
1959 else
1960 {
1961 AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
1962 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys);
1963 }
1964
1965 NOREF(uExitReason);
1966 NOREF(pszFailure);
1967 return VINF_SUCCESS;
1968}
1969
1970
1971/**
1972 * Performs a VMX abort (due to an fatal error during VM-exit).
1973 *
1974 * @returns Strict VBox status code.
1975 * @param pVCpu The cross context virtual CPU structure.
1976 * @param enmAbort The VMX abort reason.
1977 */
1978IEM_STATIC VBOXSTRICTRC iemVmxAbort(PVMCPU pVCpu, VMXABORT enmAbort)
1979{
1980 /*
1981 * Perform the VMX abort.
1982 * See Intel spec. 27.7 "VMX Aborts".
1983 */
1984 LogFunc(("enmAbort=%u (%s) -> RESET\n", enmAbort, HMVmxGetAbortDesc(enmAbort)));
1985
1986 /* We don't support SMX yet. */
1987 pVCpu->cpum.GstCtx.hwvirt.vmx.enmAbort = enmAbort;
1988 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1989 {
1990 RTGCPHYS const GCPhysVmcs = IEM_VMX_GET_CURRENT_VMCS(pVCpu);
1991 uint32_t const offVmxAbort = RT_UOFFSETOF(VMXVVMCS, u32VmxAbortId);
1992 PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + offVmxAbort, &enmAbort, sizeof(enmAbort));
1993 }
1994
1995 return VINF_EM_TRIPLE_FAULT;
1996}
1997
1998
1999/**
2000 * Loads host control registers, debug registers and MSRs as part of VM-exit.
2001 *
2002 * @param pVCpu The cross context virtual CPU structure.
2003 */
2004IEM_STATIC void iemVmxVmexitLoadHostControlRegsMsrs(PVMCPU pVCpu)
2005{
2006 /*
2007 * Load host control registers, debug registers and MSRs.
2008 * See Intel spec. 27.5.1 "Loading Host Control Registers, Debug Registers, MSRs".
2009 */
2010 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2011 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2012
2013 /* CR0. */
2014 {
2015 /* Bits 63:32, 28:19, 17, 15:6, ET, CD, NW and CR0 MB1 bits are not modified. */
2016 uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
2017 uint64_t const fCr0IgnMask = UINT64_C(0xffffffff1ff8ffc0) | X86_CR0_ET | X86_CR0_CD | X86_CR0_NW | uCr0Fixed0;
2018 uint64_t const uHostCr0 = pVmcs->u64HostCr0.u;
2019 uint64_t const uGuestCr0 = pVCpu->cpum.GstCtx.cr0;
2020 uint64_t const uValidCr0 = (uHostCr0 & ~fCr0IgnMask) | (uGuestCr0 & fCr0IgnMask);
2021 CPUMSetGuestCR0(pVCpu, uValidCr0);
2022 }
2023
2024 /* CR4. */
2025 {
2026 /* CR4 MB1 bits are not modified. */
2027 uint64_t const fCr4IgnMask = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
2028 uint64_t const uHostCr4 = pVmcs->u64HostCr4.u;
2029 uint64_t const uGuestCr4 = pVCpu->cpum.GstCtx.cr4;
2030 uint64_t uValidCr4 = (uHostCr4 & ~fCr4IgnMask) | (uGuestCr4 & fCr4IgnMask);
2031 if (fHostInLongMode)
2032 uValidCr4 |= X86_CR4_PAE;
2033 else
2034 uValidCr4 &= ~X86_CR4_PCIDE;
2035 CPUMSetGuestCR4(pVCpu, uValidCr4);
2036 }
2037
2038 /* CR3 (host value validated while checking host-state during VM-entry). */
2039 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64HostCr3.u;
2040
2041 /* DR7. */
2042 pVCpu->cpum.GstCtx.dr[7] = X86_DR7_INIT_VAL;
2043
2044 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
2045
2046 /* Save SYSENTER CS, ESP, EIP (host value validated while checking host-state during VM-entry). */
2047 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64HostSysenterEip.u;
2048 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64HostSysenterEsp.u;
2049 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32HostSysenterCs;
2050
2051 /* FS, GS bases are loaded later while we load host segment registers. */
2052
2053 /* EFER MSR (host value validated while checking host-state during VM-entry). */
2054 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
2055 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64HostEferMsr.u;
2056 else if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2057 {
2058 if (fHostInLongMode)
2059 pVCpu->cpum.GstCtx.msrEFER |= (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
2060 else
2061 pVCpu->cpum.GstCtx.msrEFER &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
2062 }
2063
2064 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
2065
2066 /* PAT MSR (host value is validated while checking host-state during VM-entry). */
2067 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
2068 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64HostPatMsr.u;
2069
2070 /* We don't support IA32_BNDCFGS MSR yet. */
2071}
2072
2073
2074/**
2075 * Loads host segment registers, GDTR, IDTR, LDTR and TR as part of VM-exit.
2076 *
2077 * @param pVCpu The cross context virtual CPU structure.
2078 */
2079IEM_STATIC void iemVmxVmexitLoadHostSegRegs(PVMCPU pVCpu)
2080{
2081 /*
2082 * Load host segment registers, GDTR, IDTR, LDTR and TR.
2083 * See Intel spec. 27.5.2 "Loading Host Segment and Descriptor-Table Registers".
2084 *
2085 * Warning! Be careful to not touch fields that are reserved by VT-x,
2086 * e.g. segment limit high bits stored in segment attributes (in bits 11:8).
2087 */
2088 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2089 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2090
2091 /* CS, SS, ES, DS, FS, GS. */
2092 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
2093 {
2094 RTSEL const HostSel = iemVmxVmcsGetHostSelReg(pVmcs, iSegReg);
2095 bool const fUnusable = RT_BOOL(HostSel == 0);
2096
2097 /* Selector. */
2098 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel = HostSel;
2099 pVCpu->cpum.GstCtx.aSRegs[iSegReg].ValidSel = HostSel;
2100 pVCpu->cpum.GstCtx.aSRegs[iSegReg].fFlags = CPUMSELREG_FLAGS_VALID;
2101
2102 /* Limit. */
2103 pVCpu->cpum.GstCtx.aSRegs[iSegReg].u32Limit = 0xffffffff;
2104
2105 /* Base and Attributes. */
2106 switch (iSegReg)
2107 {
2108 case X86_SREG_CS:
2109 {
2110 pVCpu->cpum.GstCtx.cs.u64Base = 0;
2111 pVCpu->cpum.GstCtx.cs.Attr.n.u4Type = X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED;
2112 pVCpu->cpum.GstCtx.ss.Attr.n.u1DescType = 1;
2113 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = 0;
2114 pVCpu->cpum.GstCtx.cs.Attr.n.u1Present = 1;
2115 pVCpu->cpum.GstCtx.cs.Attr.n.u1Long = fHostInLongMode;
2116 pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig = !fHostInLongMode;
2117 pVCpu->cpum.GstCtx.cs.Attr.n.u1Granularity = 1;
2118 Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Unusable);
2119 Assert(!fUnusable);
2120 break;
2121 }
2122
2123 case X86_SREG_SS:
2124 case X86_SREG_ES:
2125 case X86_SREG_DS:
2126 {
2127 pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base = 0;
2128 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2129 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DescType = 1;
2130 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u2Dpl = 0;
2131 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Present = 1;
2132 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DefBig = 1;
2133 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Granularity = 1;
2134 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable = fUnusable;
2135 break;
2136 }
2137
2138 case X86_SREG_FS:
2139 {
2140 Assert(X86_IS_CANONICAL(pVmcs->u64HostFsBase.u));
2141 pVCpu->cpum.GstCtx.fs.u64Base = !fUnusable ? pVmcs->u64HostFsBase.u : 0;
2142 pVCpu->cpum.GstCtx.fs.Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2143 pVCpu->cpum.GstCtx.fs.Attr.n.u1DescType = 1;
2144 pVCpu->cpum.GstCtx.fs.Attr.n.u2Dpl = 0;
2145 pVCpu->cpum.GstCtx.fs.Attr.n.u1Present = 1;
2146 pVCpu->cpum.GstCtx.fs.Attr.n.u1DefBig = 1;
2147 pVCpu->cpum.GstCtx.fs.Attr.n.u1Granularity = 1;
2148 pVCpu->cpum.GstCtx.fs.Attr.n.u1Unusable = fUnusable;
2149 break;
2150 }
2151
2152 case X86_SREG_GS:
2153 {
2154 Assert(X86_IS_CANONICAL(pVmcs->u64HostGsBase.u));
2155 pVCpu->cpum.GstCtx.gs.u64Base = !fUnusable ? pVmcs->u64HostGsBase.u : 0;
2156 pVCpu->cpum.GstCtx.gs.Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2157 pVCpu->cpum.GstCtx.gs.Attr.n.u1DescType = 1;
2158 pVCpu->cpum.GstCtx.gs.Attr.n.u2Dpl = 0;
2159 pVCpu->cpum.GstCtx.gs.Attr.n.u1Present = 1;
2160 pVCpu->cpum.GstCtx.gs.Attr.n.u1DefBig = 1;
2161 pVCpu->cpum.GstCtx.gs.Attr.n.u1Granularity = 1;
2162 pVCpu->cpum.GstCtx.gs.Attr.n.u1Unusable = fUnusable;
2163 break;
2164 }
2165 }
2166 }
2167
2168 /* TR. */
2169 Assert(X86_IS_CANONICAL(pVmcs->u64HostTrBase.u));
2170 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1Unusable);
2171 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->HostTr;
2172 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->HostTr;
2173 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2174 pVCpu->cpum.GstCtx.tr.u32Limit = X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN;
2175 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64HostTrBase.u;
2176 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2177 pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType = 0;
2178 pVCpu->cpum.GstCtx.tr.Attr.n.u2Dpl = 0;
2179 pVCpu->cpum.GstCtx.tr.Attr.n.u1Present = 1;
2180 pVCpu->cpum.GstCtx.tr.Attr.n.u1DefBig = 0;
2181 pVCpu->cpum.GstCtx.tr.Attr.n.u1Granularity = 0;
2182
2183 /* LDTR. */
2184 pVCpu->cpum.GstCtx.ldtr.Sel = 0;
2185 pVCpu->cpum.GstCtx.ldtr.ValidSel = 0;
2186 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2187 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
2188 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
2189 pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Unusable = 1;
2190
2191 /* GDTR. */
2192 Assert(X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u));
2193 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64HostGdtrBase.u;
2194 pVCpu->cpum.GstCtx.gdtr.cbGdt = 0xfff;
2195
2196 /* IDTR.*/
2197 Assert(X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u));
2198 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64HostIdtrBase.u;
2199 pVCpu->cpum.GstCtx.idtr.cbIdt = 0xfff;
2200}
2201
2202
2203/**
2204 * Checks host PDPTes as part of VM-exit.
2205 *
2206 * @param pVCpu The cross context virtual CPU structure.
2207 * @param uExitReason The VM-exit reason (for logging purposes).
2208 */
2209IEM_STATIC int iemVmxVmexitCheckHostPdptes(PVMCPU pVCpu, uint32_t uExitReason)
2210{
2211 /*
2212 * Check host PDPTEs.
2213 * See Intel spec. 27.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
2214 */
2215 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2216 const char *const pszFailure = "VMX-abort";
2217 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2218
2219 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
2220 && !fHostInLongMode)
2221 {
2222 uint64_t const uHostCr3 = pVCpu->cpum.GstCtx.cr3 & X86_CR3_PAE_PAGE_MASK;
2223 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
2224 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uHostCr3, sizeof(aPdptes));
2225 if (RT_SUCCESS(rc))
2226 {
2227 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
2228 {
2229 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
2230 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
2231 { /* likely */ }
2232 else
2233 {
2234 VMXVDIAG const enmDiag = iemVmxGetDiagVmexitPdpteRsvd(iPdpte);
2235 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2236 }
2237 }
2238 }
2239 else
2240 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpteCr3ReadPhys);
2241 }
2242
2243 NOREF(pszFailure);
2244 NOREF(uExitReason);
2245 return VINF_SUCCESS;
2246}
2247
2248
2249/**
2250 * Loads the host MSRs from the VM-exit auto-load MSRs area as part of VM-exit.
2251 *
2252 * @returns VBox status code.
2253 * @param pVCpu The cross context virtual CPU structure.
2254 * @param pszInstr The VMX instruction name (for logging purposes).
2255 */
2256IEM_STATIC int iemVmxVmexitLoadHostAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
2257{
2258 /*
2259 * Load host MSRs.
2260 * See Intel spec. 27.6 "Loading MSRs".
2261 */
2262 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2263 const char *const pszFailure = "VMX-abort";
2264
2265 /*
2266 * The VM-exit MSR-load area address need not be a valid guest-physical address if the
2267 * VM-exit MSR load count is 0. If this is the case, bail early without reading it.
2268 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
2269 */
2270 uint32_t const cMsrs = pVmcs->u32ExitMsrLoadCount;
2271 if (!cMsrs)
2272 return VINF_SUCCESS;
2273
2274 /*
2275 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count
2276 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
2277 * implementation causes a VMX-abort followed by a triple-fault.
2278 */
2279 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
2280 if (fIsMsrCountValid)
2281 { /* likely */ }
2282 else
2283 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount);
2284
2285 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrLoad.u;
2286 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
2287 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
2288 if (RT_SUCCESS(rc))
2289 {
2290 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
2291 Assert(pMsr);
2292 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
2293 {
2294 if ( !pMsr->u32Reserved
2295 && pMsr->u32Msr != MSR_K8_FS_BASE
2296 && pMsr->u32Msr != MSR_K8_GS_BASE
2297 && pMsr->u32Msr != MSR_K6_EFER
2298 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
2299 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
2300 {
2301 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
2302 if (rcStrict == VINF_SUCCESS)
2303 continue;
2304
2305 /*
2306 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
2307 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
2308 * recording the MSR index in the auxiliary info. field and indicated further by our
2309 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
2310 * if possible, or come up with a better, generic solution.
2311 */
2312 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2313 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
2314 ? kVmxVDiag_Vmexit_MsrLoadRing3
2315 : kVmxVDiag_Vmexit_MsrLoad;
2316 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2317 }
2318 else
2319 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadRsvd);
2320 }
2321 }
2322 else
2323 {
2324 AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
2325 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys);
2326 }
2327
2328 NOREF(uExitReason);
2329 NOREF(pszFailure);
2330 return VINF_SUCCESS;
2331}
2332
2333
2334/**
2335 * Loads the host state as part of VM-exit.
2336 *
2337 * @returns Strict VBox status code.
2338 * @param pVCpu The cross context virtual CPU structure.
2339 * @param uExitReason The VM-exit reason (for logging purposes).
2340 */
2341IEM_STATIC VBOXSTRICTRC iemVmxVmexitLoadHostState(PVMCPU pVCpu, uint32_t uExitReason)
2342{
2343 /*
2344 * Load host state.
2345 * See Intel spec. 27.5 "Loading Host State".
2346 */
2347 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2348 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2349
2350 /* We cannot return from a long-mode guest to a host that is not in long mode. */
2351 if ( CPUMIsGuestInLongMode(pVCpu)
2352 && !fHostInLongMode)
2353 {
2354 Log(("VM-exit from long-mode guest to host not in long-mode -> VMX-Abort\n"));
2355 return iemVmxAbort(pVCpu, VMXABORT_HOST_NOT_IN_LONG_MODE);
2356 }
2357
2358 iemVmxVmexitLoadHostControlRegsMsrs(pVCpu);
2359 iemVmxVmexitLoadHostSegRegs(pVCpu);
2360
2361 /*
2362 * Load host RIP, RSP and RFLAGS.
2363 * See Intel spec. 27.5.3 "Loading Host RIP, RSP and RFLAGS"
2364 */
2365 pVCpu->cpum.GstCtx.rip = pVmcs->u64HostRip.u;
2366 pVCpu->cpum.GstCtx.rsp = pVmcs->u64HostRsp.u;
2367 pVCpu->cpum.GstCtx.rflags.u = X86_EFL_1;
2368
2369 /* Update non-register state. */
2370 iemVmxVmexitRestoreForceFlags(pVCpu);
2371
2372 /* Clear address range monitoring. */
2373 EMMonitorWaitClear(pVCpu);
2374
2375 /* Perform the VMX transition (PGM updates). */
2376 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
2377 if (rcStrict == VINF_SUCCESS)
2378 {
2379 /* Check host PDPTEs (only when we've fully switched page tables_. */
2380 /** @todo r=ramshankar: I don't know if PGM does this for us already or not... */
2381 int rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason);
2382 if (RT_FAILURE(rc))
2383 {
2384 Log(("VM-exit failed while restoring host PDPTEs -> VMX-Abort\n"));
2385 return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
2386 }
2387 }
2388 else if (RT_SUCCESS(rcStrict))
2389 {
2390 Log3(("VM-exit: iemVmxWorldSwitch returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),
2391 uExitReason));
2392 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2393 }
2394 else
2395 {
2396 Log3(("VM-exit: iemVmxWorldSwitch failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
2397 return VBOXSTRICTRC_VAL(rcStrict);
2398 }
2399
2400 Assert(rcStrict == VINF_SUCCESS);
2401
2402 /* Load MSRs from the VM-exit auto-load MSR area. */
2403 int rc = iemVmxVmexitLoadHostAutoMsrs(pVCpu, uExitReason);
2404 if (RT_FAILURE(rc))
2405 {
2406 Log(("VM-exit failed while loading host MSRs -> VMX-Abort\n"));
2407 return iemVmxAbort(pVCpu, VMXABORT_LOAD_HOST_MSR);
2408 }
2409
2410 return rcStrict;
2411}
2412
2413
2414/**
2415 * Gets VM-exit instruction information along with any displacement for an
2416 * instruction VM-exit.
2417 *
2418 * @returns The VM-exit instruction information.
2419 * @param pVCpu The cross context virtual CPU structure.
2420 * @param uExitReason The VM-exit reason.
2421 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_XXX).
2422 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
2423 * NULL.
2424 */
2425IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, PRTGCPTR pGCPtrDisp)
2426{
2427 RTGCPTR GCPtrDisp;
2428 VMXEXITINSTRINFO ExitInstrInfo;
2429 ExitInstrInfo.u = 0;
2430
2431 /*
2432 * Get and parse the ModR/M byte from our decoded opcodes.
2433 */
2434 uint8_t bRm;
2435 uint8_t const offModRm = pVCpu->iem.s.offModRm;
2436 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
2437 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2438 {
2439 /*
2440 * ModR/M indicates register addressing.
2441 *
2442 * The primary/secondary register operands are reported in the iReg1 or iReg2
2443 * fields depending on whether it is a read/write form.
2444 */
2445 uint8_t idxReg1;
2446 uint8_t idxReg2;
2447 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
2448 {
2449 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
2450 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
2451 }
2452 else
2453 {
2454 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
2455 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
2456 }
2457 ExitInstrInfo.All.u2Scaling = 0;
2458 ExitInstrInfo.All.iReg1 = idxReg1;
2459 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
2460 ExitInstrInfo.All.fIsRegOperand = 1;
2461 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
2462 ExitInstrInfo.All.iSegReg = 0;
2463 ExitInstrInfo.All.iIdxReg = 0;
2464 ExitInstrInfo.All.fIdxRegInvalid = 1;
2465 ExitInstrInfo.All.iBaseReg = 0;
2466 ExitInstrInfo.All.fBaseRegInvalid = 1;
2467 ExitInstrInfo.All.iReg2 = idxReg2;
2468
2469 /* Displacement not applicable for register addressing. */
2470 GCPtrDisp = 0;
2471 }
2472 else
2473 {
2474 /*
2475 * ModR/M indicates memory addressing.
2476 */
2477 uint8_t uScale = 0;
2478 bool fBaseRegValid = false;
2479 bool fIdxRegValid = false;
2480 uint8_t iBaseReg = 0;
2481 uint8_t iIdxReg = 0;
2482 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
2483 {
2484 /*
2485 * Parse the ModR/M, displacement for 16-bit addressing mode.
2486 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
2487 */
2488 uint16_t u16Disp = 0;
2489 uint8_t const offDisp = offModRm + sizeof(bRm);
2490 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
2491 {
2492 /* Displacement without any registers. */
2493 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
2494 }
2495 else
2496 {
2497 /* Register (index and base). */
2498 switch (bRm & X86_MODRM_RM_MASK)
2499 {
2500 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2501 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2502 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2503 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2504 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
2505 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
2506 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
2507 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
2508 }
2509
2510 /* Register + displacement. */
2511 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2512 {
2513 case 0: break;
2514 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
2515 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
2516 default:
2517 {
2518 /* Register addressing, handled at the beginning. */
2519 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2520 break;
2521 }
2522 }
2523 }
2524
2525 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
2526 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
2527 }
2528 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
2529 {
2530 /*
2531 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
2532 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
2533 */
2534 uint32_t u32Disp = 0;
2535 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
2536 {
2537 /* Displacement without any registers. */
2538 uint8_t const offDisp = offModRm + sizeof(bRm);
2539 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
2540 }
2541 else
2542 {
2543 /* Register (and perhaps scale, index and base). */
2544 uint8_t offDisp = offModRm + sizeof(bRm);
2545 iBaseReg = (bRm & X86_MODRM_RM_MASK);
2546 if (iBaseReg == 4)
2547 {
2548 /* An SIB byte follows the ModR/M byte, parse it. */
2549 uint8_t bSib;
2550 uint8_t const offSib = offModRm + sizeof(bRm);
2551 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
2552
2553 /* A displacement may follow SIB, update its offset. */
2554 offDisp += sizeof(bSib);
2555
2556 /* Get the scale. */
2557 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
2558
2559 /* Get the index register. */
2560 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
2561 fIdxRegValid = RT_BOOL(iIdxReg != 4);
2562
2563 /* Get the base register. */
2564 iBaseReg = bSib & X86_SIB_BASE_MASK;
2565 fBaseRegValid = true;
2566 if (iBaseReg == 5)
2567 {
2568 if ((bRm & X86_MODRM_MOD_MASK) == 0)
2569 {
2570 /* Mod is 0 implies a 32-bit displacement with no base. */
2571 fBaseRegValid = false;
2572 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
2573 }
2574 else
2575 {
2576 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
2577 iBaseReg = X86_GREG_xBP;
2578 }
2579 }
2580 }
2581
2582 /* Register + displacement. */
2583 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2584 {
2585 case 0: /* Handled above */ break;
2586 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
2587 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
2588 default:
2589 {
2590 /* Register addressing, handled at the beginning. */
2591 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2592 break;
2593 }
2594 }
2595 }
2596
2597 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
2598 }
2599 else
2600 {
2601 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
2602
2603 /*
2604 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
2605 * See Intel instruction spec. 2.2 "IA-32e Mode".
2606 */
2607 uint64_t u64Disp = 0;
2608 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
2609 if (fRipRelativeAddr)
2610 {
2611 /*
2612 * RIP-relative addressing mode.
2613 *
2614 * The displacement is 32-bit signed implying an offset range of +/-2G.
2615 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
2616 */
2617 uint8_t const offDisp = offModRm + sizeof(bRm);
2618 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
2619 }
2620 else
2621 {
2622 uint8_t offDisp = offModRm + sizeof(bRm);
2623
2624 /*
2625 * Register (and perhaps scale, index and base).
2626 *
2627 * REX.B extends the most-significant bit of the base register. However, REX.B
2628 * is ignored while determining whether an SIB follows the opcode. Hence, we
2629 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
2630 *
2631 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
2632 */
2633 iBaseReg = (bRm & X86_MODRM_RM_MASK);
2634 if (iBaseReg == 4)
2635 {
2636 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
2637 uint8_t bSib;
2638 uint8_t const offSib = offModRm + sizeof(bRm);
2639 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
2640
2641 /* Displacement may follow SIB, update its offset. */
2642 offDisp += sizeof(bSib);
2643
2644 /* Get the scale. */
2645 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
2646
2647 /* Get the index. */
2648 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
2649 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
2650
2651 /* Get the base. */
2652 iBaseReg = (bSib & X86_SIB_BASE_MASK);
2653 fBaseRegValid = true;
2654 if (iBaseReg == 5)
2655 {
2656 if ((bRm & X86_MODRM_MOD_MASK) == 0)
2657 {
2658 /* Mod is 0 implies a signed 32-bit displacement with no base. */
2659 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
2660 }
2661 else
2662 {
2663 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
2664 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
2665 }
2666 }
2667 }
2668 iBaseReg |= pVCpu->iem.s.uRexB;
2669
2670 /* Register + displacement. */
2671 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
2672 {
2673 case 0: /* Handled above */ break;
2674 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
2675 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
2676 default:
2677 {
2678 /* Register addressing, handled at the beginning. */
2679 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
2680 break;
2681 }
2682 }
2683 }
2684
2685 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
2686 }
2687
2688 /*
2689 * The primary or secondary register operand is reported in iReg2 depending
2690 * on whether the primary operand is in read/write form.
2691 */
2692 uint8_t idxReg2;
2693 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
2694 {
2695 idxReg2 = bRm & X86_MODRM_RM_MASK;
2696 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
2697 idxReg2 |= pVCpu->iem.s.uRexB;
2698 }
2699 else
2700 {
2701 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK;
2702 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
2703 idxReg2 |= pVCpu->iem.s.uRexReg;
2704 }
2705 ExitInstrInfo.All.u2Scaling = uScale;
2706 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */
2707 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
2708 ExitInstrInfo.All.fIsRegOperand = 0;
2709 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
2710 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
2711 ExitInstrInfo.All.iIdxReg = iIdxReg;
2712 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
2713 ExitInstrInfo.All.iBaseReg = iBaseReg;
2714 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
2715 ExitInstrInfo.All.iReg2 = idxReg2;
2716 }
2717
2718 /*
2719 * Handle exceptions to the norm for certain instructions.
2720 * (e.g. some instructions convey an instruction identity in place of iReg2).
2721 */
2722 switch (uExitReason)
2723 {
2724 case VMX_EXIT_GDTR_IDTR_ACCESS:
2725 {
2726 Assert(VMXINSTRID_IS_VALID(uInstrId));
2727 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
2728 ExitInstrInfo.GdtIdt.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
2729 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
2730 break;
2731 }
2732
2733 case VMX_EXIT_LDTR_TR_ACCESS:
2734 {
2735 Assert(VMXINSTRID_IS_VALID(uInstrId));
2736 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
2737 ExitInstrInfo.LdtTr.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
2738 ExitInstrInfo.LdtTr.u2Undef0 = 0;
2739 break;
2740 }
2741
2742 case VMX_EXIT_RDRAND:
2743 case VMX_EXIT_RDSEED:
2744 {
2745 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
2746 break;
2747 }
2748 }
2749
2750 /* Update displacement and return the constructed VM-exit instruction information field. */
2751 if (pGCPtrDisp)
2752 *pGCPtrDisp = GCPtrDisp;
2753
2754 return ExitInstrInfo.u;
2755}
2756
2757
2758/**
2759 * VMX VM-exit handler.
2760 *
2761 * @returns Strict VBox status code.
2762 * @retval VINF_VMX_VMEXIT when the VM-exit is successful.
2763 * @retval VINF_EM_TRIPLE_FAULT when VM-exit is unsuccessful and leads to a
2764 * triple-fault.
2765 *
2766 * @param pVCpu The cross context virtual CPU structure.
2767 * @param uExitReason The VM-exit reason.
2768 *
2769 * @remarks Make sure VM-exit qualification is updated before calling this
2770 * function!
2771 */
2772IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason)
2773{
2774 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2775 Assert(pVmcs);
2776
2777 pVmcs->u32RoExitReason = uExitReason;
2778
2779 /** @todo NSTVMX: IEMGetCurrentXcpt will be VM-exit interruption info. */
2780 /** @todo NSTVMX: The source event should be recorded in IDT-vectoring info
2781 * during injection. */
2782
2783 /*
2784 * Save the guest state back into the VMCS.
2785 * We only need to save the state when the VM-entry was successful.
2786 */
2787 bool const fVmentryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
2788 if (!fVmentryFailed)
2789 {
2790 iemVmxVmexitSaveGuestState(pVCpu, uExitReason);
2791 int rc = iemVmxVmexitSaveGuestAutoMsrs(pVCpu, uExitReason);
2792 if (RT_SUCCESS(rc))
2793 { /* likely */ }
2794 else
2795 return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS);
2796 }
2797
2798 /*
2799 * The high bits of the VM-exit reason are only relevant when the VM-exit occurs in
2800 * enclave mode/SMM which we don't support yet. If we ever add support for it, we can
2801 * pass just the lower bits, till then an assert should suffice.
2802 */
2803 Assert(!RT_HI_U16(uExitReason));
2804
2805 VBOXSTRICTRC rcStrict = iemVmxVmexitLoadHostState(pVCpu, uExitReason);
2806 if (RT_FAILURE(rcStrict))
2807 LogFunc(("Loading host-state failed. uExitReason=%u rc=%Rrc\n", uExitReason, VBOXSTRICTRC_VAL(rcStrict)));
2808
2809 /* We're no longer in nested-guest execution mode. */
2810 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
2811
2812 Assert(rcStrict == VINF_SUCCESS);
2813 return VINF_VMX_VMEXIT;
2814}
2815
2816
2817/**
2818 * VMX VM-exit handler for VM-exits due to instruction execution.
2819 *
2820 * This is intended for instructions where the caller provides all the relevant
2821 * VM-exit information.
2822 *
2823 * @returns Strict VBox status code.
2824 * @param pVCpu The cross context virtual CPU structure.
2825 * @param pExitInfo Pointer to the VM-exit instruction information struct.
2826 */
2827DECLINLINE(VBOXSTRICTRC) iemVmxVmexitInstrWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
2828{
2829 /*
2830 * For instructions where any of the following fields are not applicable:
2831 * - VM-exit instruction info. is undefined.
2832 * - VM-exit qualification must be cleared.
2833 * - VM-exit guest-linear address is undefined.
2834 * - VM-exit guest-physical address is undefined.
2835 *
2836 * The VM-exit instruction length is mandatory for all VM-exits that are caused by
2837 * instruction execution. For VM-exits that are not due to instruction execution this
2838 * field is undefined.
2839 *
2840 * In our implementation in IEM, all undefined fields are generally cleared. However,
2841 * if the caller supplies information (from say the physical CPU directly) it is
2842 * then possible that the undefined fields are not cleared.
2843 *
2844 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2845 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
2846 */
2847 Assert(pExitInfo);
2848 AssertMsg(pExitInfo->uReason <= VMX_EXIT_MAX, ("uReason=%u\n", pExitInfo->uReason));
2849 AssertMsg(pExitInfo->cbInstr >= 1 && pExitInfo->cbInstr <= 15,
2850 ("uReason=%u cbInstr=%u\n", pExitInfo->uReason, pExitInfo->cbInstr));
2851
2852 /* Update all the relevant fields from the VM-exit instruction information struct. */
2853 iemVmxVmcsSetExitInstrInfo(pVCpu, pExitInfo->InstrInfo.u);
2854 iemVmxVmcsSetExitQual(pVCpu, pExitInfo->u64Qual);
2855 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, pExitInfo->u64GuestLinearAddr);
2856 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, pExitInfo->u64GuestPhysAddr);
2857 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
2858
2859 /* Perform the VM-exit. */
2860 return iemVmxVmexit(pVCpu, pExitInfo->uReason);
2861}
2862
2863
2864/**
2865 * VMX VM-exit handler for VM-exits due to instruction execution.
2866 *
2867 * This is intended for instructions that only provide the VM-exit instruction
2868 * length.
2869 *
2870 * @param pVCpu The cross context virtual CPU structure.
2871 * @param uExitReason The VM-exit reason.
2872 * @param cbInstr The instruction length in bytes.
2873 */
2874IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstr(PVMCPU pVCpu, uint32_t uExitReason, uint8_t cbInstr)
2875{
2876 VMXVEXITINFO ExitInfo;
2877 RT_ZERO(ExitInfo);
2878 ExitInfo.uReason = uExitReason;
2879 ExitInfo.cbInstr = cbInstr;
2880
2881#ifdef VBOX_STRICT
2882 /* To prevent us from shooting ourselves in the foot. Maybe remove later. */
2883 switch (uExitReason)
2884 {
2885 case VMX_EXIT_INVEPT:
2886 case VMX_EXIT_INVPCID:
2887 case VMX_EXIT_LDTR_TR_ACCESS:
2888 case VMX_EXIT_GDTR_IDTR_ACCESS:
2889 case VMX_EXIT_VMCLEAR:
2890 case VMX_EXIT_VMPTRLD:
2891 case VMX_EXIT_VMPTRST:
2892 case VMX_EXIT_VMREAD:
2893 case VMX_EXIT_VMWRITE:
2894 case VMX_EXIT_VMXON:
2895 case VMX_EXIT_XRSTORS:
2896 case VMX_EXIT_XSAVES:
2897 case VMX_EXIT_RDRAND:
2898 case VMX_EXIT_RDSEED:
2899 case VMX_EXIT_IO_INSTR:
2900 AssertMsgFailedReturn(("Use iemVmxVmexitInstrNeedsInfo for uExitReason=%u\n", uExitReason), VERR_IEM_IPE_5);
2901 break;
2902 }
2903#endif
2904
2905 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2906}
2907
2908
2909/**
2910 * VMX VM-exit handler for VM-exits due to instruction execution.
2911 *
2912 * This is intended for instructions that have a ModR/M byte and update the VM-exit
2913 * instruction information and VM-exit qualification fields.
2914 *
2915 * @param pVCpu The cross context virtual CPU structure.
2916 * @param uExitReason The VM-exit reason.
2917 * @param uInstrid The instruction identity (VMXINSTRID_XXX).
2918 * @param cbInstr The instruction length in bytes.
2919 *
2920 * @remarks Do not use this for INS/OUTS instruction.
2921 */
2922IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr)
2923{
2924 VMXVEXITINFO ExitInfo;
2925 RT_ZERO(ExitInfo);
2926 ExitInfo.uReason = uExitReason;
2927 ExitInfo.cbInstr = cbInstr;
2928
2929 /*
2930 * Update the VM-exit qualification field with displacement bytes.
2931 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2932 */
2933 switch (uExitReason)
2934 {
2935 case VMX_EXIT_INVEPT:
2936 case VMX_EXIT_INVPCID:
2937 case VMX_EXIT_LDTR_TR_ACCESS:
2938 case VMX_EXIT_GDTR_IDTR_ACCESS:
2939 case VMX_EXIT_VMCLEAR:
2940 case VMX_EXIT_VMPTRLD:
2941 case VMX_EXIT_VMPTRST:
2942 case VMX_EXIT_VMREAD:
2943 case VMX_EXIT_VMWRITE:
2944 case VMX_EXIT_VMXON:
2945 case VMX_EXIT_XRSTORS:
2946 case VMX_EXIT_XSAVES:
2947 case VMX_EXIT_RDRAND:
2948 case VMX_EXIT_RDSEED:
2949 {
2950 /* Construct the VM-exit instruction information. */
2951 RTGCPTR GCPtrDisp;
2952 uint32_t const uInstrInfo = iemVmxGetExitInstrInfo(pVCpu, uExitReason, uInstrId, &GCPtrDisp);
2953
2954 /* Update the VM-exit instruction information. */
2955 ExitInfo.InstrInfo.u = uInstrInfo;
2956
2957 /* Update the VM-exit qualification. */
2958 ExitInfo.u64Qual = GCPtrDisp;
2959 break;
2960 }
2961
2962 default:
2963 AssertMsgFailedReturn(("Use instruction-specific handler\n"), VERR_IEM_IPE_5);
2964 break;
2965 }
2966
2967 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2968}
2969
2970
2971/**
2972 * Checks whether an I/O instruction for the given port is intercepted (causes a
2973 * VM-exit) or not.
2974 *
2975 * @returns @c true if the instruction is intercepted, @c false otherwise.
2976 * @param pVCpu The cross context virtual CPU structure.
2977 * @param u16Port The I/O port being accessed by the instruction.
2978 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
2979 */
2980IEM_STATIC bool iemVmxIsIoInterceptSet(PVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
2981{
2982 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2983 Assert(pVmcs);
2984
2985 /*
2986 * Check whether the I/O instruction must cause a VM-exit or not.
2987 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2988 */
2989 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT)
2990 return true;
2991
2992 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
2993 {
2994 uint8_t const *pbIoBitmapA = (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvIoBitmap);
2995 uint8_t const *pbIoBitmapB = (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvIoBitmap) + VMX_V_IO_BITMAP_A_SIZE;
2996 Assert(pbIoBitmapA);
2997 Assert(pbIoBitmapB);
2998 return HMVmxGetIoBitmapPermission(pbIoBitmapA, pbIoBitmapB, u16Port, cbAccess);
2999 }
3000
3001 return false;
3002}
3003
3004
3005/**
3006 * VMX VM-exit handler for VM-exits due to INVLPG.
3007 *
3008 * @param pVCpu The cross context virtual CPU structure.
3009 * @param GCPtrPage The guest-linear address of the page being invalidated.
3010 * @param cbInstr The instruction length in bytes.
3011 */
3012IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrInvlpg(PVMCPU pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr)
3013{
3014 VMXVEXITINFO ExitInfo;
3015 RT_ZERO(ExitInfo);
3016 ExitInfo.uReason = VMX_EXIT_INVLPG;
3017 ExitInfo.cbInstr = cbInstr;
3018 ExitInfo.u64Qual = GCPtrPage;
3019 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(ExitInfo.u64Qual));
3020
3021 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3022}
3023
3024
3025/**
3026 * VMX VM-exit handler for VM-exits due to LMSW.
3027 *
3028 * @returns Strict VBox status code.
3029 * @param pVCpu The cross context virtual CPU structure.
3030 * @param uGuestCr0 The current guest CR0.
3031 * @param pu16NewMsw The machine-status word specified in LMSW's source
3032 * operand. This will be updated depending on the VMX
3033 * guest/host CR0 mask if LMSW is not intercepted.
3034 * @param GCPtrEffDst The guest-linear address of the source operand in case
3035 * of a memory operand. For register operand, pass
3036 * NIL_RTGCPTR.
3037 * @param cbInstr The instruction length in bytes.
3038 */
3039IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPU pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw, RTGCPTR GCPtrEffDst,
3040 uint8_t cbInstr)
3041{
3042 /*
3043 * LMSW VM-exits are subject to the CR0 guest/host mask and the CR0 read shadow.
3044 *
3045 * See Intel spec. 24.6.6 "Guest/Host Masks and Read Shadows for CR0 and CR4".
3046 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3047 */
3048 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3049 Assert(pVmcs);
3050 Assert(pu16NewMsw);
3051
3052 bool fIntercept = false;
3053 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
3054 uint32_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
3055
3056 /*
3057 * LMSW can never clear CR0.PE but it may set it. Hence, we handle the
3058 * CR0.PE case first, before the rest of the bits in the MSW.
3059 *
3060 * If CR0.PE is owned by the host and CR0.PE differs between the
3061 * MSW (source operand) and the read-shadow, we must cause a VM-exit.
3062 */
3063 if ( (fGstHostMask & X86_CR0_PE)
3064 && (*pu16NewMsw & X86_CR0_PE)
3065 && !(fReadShadow & X86_CR0_PE))
3066 fIntercept = true;
3067
3068 /*
3069 * If CR0.MP, CR0.EM or CR0.TS is owned by the host, and the corresponding
3070 * bits differ between the MSW (source operand) and the read-shadow, we must
3071 * cause a VM-exit.
3072 */
3073 uint32_t fGstHostLmswMask = fGstHostMask & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3074 if ((fReadShadow & fGstHostLmswMask) != (*pu16NewMsw & fGstHostLmswMask))
3075 fIntercept = true;
3076
3077 if (fIntercept)
3078 {
3079 Log2(("lmsw: Guest intercept -> VM-exit\n"));
3080
3081 VMXVEXITINFO ExitInfo;
3082 RT_ZERO(ExitInfo);
3083 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3084 ExitInfo.cbInstr = cbInstr;
3085
3086 bool const fMemOperand = RT_BOOL(GCPtrEffDst != NIL_RTGCPTR);
3087 if (fMemOperand)
3088 {
3089 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(GCPtrEffDst));
3090 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
3091 }
3092
3093 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
3094 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_LMSW)
3095 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_OP, fMemOperand)
3096 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_DATA, *pu16NewMsw);
3097
3098 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3099 }
3100
3101 /*
3102 * If LMSW did not cause a VM-exit, any CR0 bits in the range 0:3 that is set in the
3103 * CR0 guest/host mask must be left unmodified.
3104 *
3105 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3106 */
3107 fGstHostLmswMask = fGstHostMask & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3108 *pu16NewMsw = (uGuestCr0 & fGstHostLmswMask) | (*pu16NewMsw & ~fGstHostLmswMask);
3109
3110 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3111}
3112
3113
3114/**
3115 * VMX VM-exit handler for VM-exits due to CLTS.
3116 *
3117 * @returns Strict VBox status code.
3118 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the CLTS instruction did not cause a
3119 * VM-exit but must not modify the guest CR0.TS bit.
3120 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the CLTS instruction did not cause a
3121 * VM-exit and modification to the guest CR0.TS bit is allowed (subject to
3122 * CR0 fixed bits in VMX operation).
3123 * @param pVCpu The cross context virtual CPU structure.
3124 * @param cbInstr The instruction length in bytes.
3125 */
3126IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPU pVCpu, uint8_t cbInstr)
3127{
3128 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3129 Assert(pVmcs);
3130
3131 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
3132 uint32_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
3133
3134 /*
3135 * If CR0.TS is owned by the host:
3136 * - If CR0.TS is set in the read-shadow, we must cause a VM-exit.
3137 * - If CR0.TS is cleared in the read-shadow, no VM-exit is caused and the
3138 * CLTS instruction completes without clearing CR0.TS.
3139 *
3140 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3141 */
3142 if (fGstHostMask & X86_CR0_TS)
3143 {
3144 if (fReadShadow & X86_CR0_TS)
3145 {
3146 Log2(("clts: Guest intercept -> VM-exit\n"));
3147
3148 VMXVEXITINFO ExitInfo;
3149 RT_ZERO(ExitInfo);
3150 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3151 ExitInfo.cbInstr = cbInstr;
3152
3153 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
3154 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_CLTS);
3155 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3156 }
3157
3158 return VINF_VMX_MODIFIES_BEHAVIOR;
3159 }
3160
3161 /*
3162 * If CR0.TS is not owned by the host, the CLTS instructions operates normally
3163 * and may modify CR0.TS (subject to CR0 fixed bits in VMX operation).
3164 */
3165 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3166}
3167
3168
3169/**
3170 * VMX VM-exit handler for VM-exits due to 'Mov CR0,GReg' and 'Mov CR4,GReg'
3171 * (CR0/CR4 write).
3172 *
3173 * @returns Strict VBox status code.
3174 * @param pVCpu The cross context virtual CPU structure.
3175 * @param iCrReg The control register (either CR0 or CR4).
3176 * @param uGuestCrX The current guest CR0/CR4.
3177 * @param puNewCrX Pointer to the new CR0/CR4 value. Will be updated
3178 * if no VM-exit is caused.
3179 * @param iGReg The general register from which the CR0/CR4 value is
3180 * being loaded.
3181 * @param cbInstr The instruction length in bytes.
3182 */
3183IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr0Cr4(PVMCPU pVCpu, uint8_t iCrReg, uint64_t *puNewCrX, uint8_t iGReg,
3184 uint8_t cbInstr)
3185{
3186 Assert(puNewCrX);
3187 Assert(iCrReg == 0 || iCrReg == 4);
3188
3189 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3190 Assert(pVmcs);
3191
3192 uint64_t uGuestCrX;
3193 uint64_t fGstHostMask;
3194 uint64_t fReadShadow;
3195 if (iCrReg == 0)
3196 {
3197 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
3198 uGuestCrX = pVCpu->cpum.GstCtx.cr0;
3199 fGstHostMask = pVmcs->u64Cr0Mask.u;
3200 fReadShadow = pVmcs->u64Cr0ReadShadow.u;
3201 }
3202 else
3203 {
3204 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
3205 uGuestCrX = pVCpu->cpum.GstCtx.cr4;
3206 fGstHostMask = pVmcs->u64Cr4Mask.u;
3207 fReadShadow = pVmcs->u64Cr4ReadShadow.u;
3208 }
3209
3210 /*
3211 * For any CR0/CR4 bit owned by the host (in the CR0/CR4 guest/host mask), if the
3212 * corresponding bits differ between the source operand and the read-shadow,
3213 * we must cause a VM-exit.
3214 *
3215 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3216 */
3217 if ((fReadShadow & fGstHostMask) != (*puNewCrX & fGstHostMask))
3218 {
3219 Log2(("mov_Cr_Rd: (CR%u) Guest intercept -> VM-exit\n", iCrReg));
3220
3221 VMXVEXITINFO ExitInfo;
3222 RT_ZERO(ExitInfo);
3223 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3224 ExitInfo.cbInstr = cbInstr;
3225
3226 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, iCrReg)
3227 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3228 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3229 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3230 }
3231
3232 /*
3233 * If the Mov-to-CR0/CR4 did not cause a VM-exit, any bits owned by the host
3234 * must not be modified the instruction.
3235 *
3236 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3237 */
3238 *puNewCrX = (uGuestCrX & fGstHostMask) | (*puNewCrX & ~fGstHostMask);
3239
3240 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3241}
3242
3243
3244/**
3245 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR3' (CR3 read).
3246 *
3247 * @returns VBox strict status code.
3248 * @param pVCpu The cross context virtual CPU structure.
3249 * @param iGReg The general register to which the CR3 value is being stored.
3250 * @param cbInstr The instruction length in bytes.
3251 */
3252IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3253{
3254 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3255 Assert(pVmcs);
3256 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
3257
3258 /*
3259 * If the CR3-store exiting control is set, we must cause a VM-exit.
3260 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3261 */
3262 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT)
3263 {
3264 Log2(("mov_Rd_Cr: (CR3) Guest intercept -> VM-exit\n"));
3265
3266 VMXVEXITINFO ExitInfo;
3267 RT_ZERO(ExitInfo);
3268 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3269 ExitInfo.cbInstr = cbInstr;
3270
3271 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
3272 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ)
3273 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3274 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3275 }
3276
3277 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3278}
3279
3280
3281/**
3282 * VMX VM-exit handler for VM-exits due to 'Mov CR3,GReg' (CR3 write).
3283 *
3284 * @returns VBox strict status code.
3285 * @param pVCpu The cross context virtual CPU structure.
3286 * @param uNewCr3 The new CR3 value.
3287 * @param iGReg The general register from which the CR3 value is being
3288 * loaded.
3289 * @param cbInstr The instruction length in bytes.
3290 */
3291IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr3(PVMCPU pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr)
3292{
3293 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3294 Assert(pVmcs);
3295
3296 /*
3297 * If the CR3-load exiting control is set and the new CR3 value does not
3298 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
3299 *
3300 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3301 */
3302 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_LOAD_EXIT)
3303 {
3304 uint32_t uCr3TargetCount = pVmcs->u32Cr3TargetCount;
3305 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
3306
3307 for (uint32_t idxCr3Target = 0; idxCr3Target < uCr3TargetCount; idxCr3Target++)
3308 {
3309 uint64_t const uCr3TargetValue = iemVmxVmcsGetCr3TargetValue(pVmcs, idxCr3Target);
3310 if (uNewCr3 != uCr3TargetValue)
3311 {
3312 Log2(("mov_Cr_Rd: (CR3) Guest intercept -> VM-exit\n"));
3313
3314 VMXVEXITINFO ExitInfo;
3315 RT_ZERO(ExitInfo);
3316 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3317 ExitInfo.cbInstr = cbInstr;
3318
3319 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
3320 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3321 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3322 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3323 }
3324 }
3325 }
3326
3327 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3328}
3329
3330
3331/**
3332 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR8' (CR8 read).
3333 *
3334 * @returns VBox strict status code.
3335 * @param pVCpu The cross context virtual CPU structure.
3336 * @param iGReg The general register to which the CR8 value is being stored.
3337 * @param cbInstr The instruction length in bytes.
3338 */
3339IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3340{
3341 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3342 Assert(pVmcs);
3343
3344 /*
3345 * If the CR8-store exiting control is set, we must cause a VM-exit.
3346 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3347 */
3348 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT)
3349 {
3350 Log2(("mov_Rd_Cr: (CR8) Guest intercept -> VM-exit\n"));
3351
3352 VMXVEXITINFO ExitInfo;
3353 RT_ZERO(ExitInfo);
3354 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3355 ExitInfo.cbInstr = cbInstr;
3356
3357 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
3358 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ)
3359 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3360 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3361 }
3362
3363 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3364}
3365
3366
3367/**
3368 * VMX VM-exit handler for VM-exits due to 'Mov CR8,GReg' (CR8 write).
3369 *
3370 * @returns VBox strict status code.
3371 * @param pVCpu The cross context virtual CPU structure.
3372 * @param iGReg The general register from which the CR8 value is being
3373 * loaded.
3374 * @param cbInstr The instruction length in bytes.
3375 */
3376IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3377{
3378 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3379 Assert(pVmcs);
3380
3381 /*
3382 * If the CR8-load exiting control is set, we must cause a VM-exit.
3383 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3384 */
3385 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT)
3386 {
3387 Log2(("mov_Cr_Rd: (CR8) Guest intercept -> VM-exit\n"));
3388
3389 VMXVEXITINFO ExitInfo;
3390 RT_ZERO(ExitInfo);
3391 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3392 ExitInfo.cbInstr = cbInstr;
3393
3394 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
3395 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3396 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3397 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3398 }
3399
3400 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3401}
3402
3403
3404/**
3405 * VMX VM-exit handler for VM-exits due to 'Mov DRx,GReg' (DRx write) and 'Mov
3406 * GReg,DRx' (DRx read).
3407 *
3408 * @returns VBox strict status code.
3409 * @param pVCpu The cross context virtual CPU structure.
3410 * @param uInstrid The instruction identity (VMXINSTRID_MOV_TO_DRX or
3411 * VMXINSTRID_MOV_FROM_DRX).
3412 * @param iDrReg The debug register being accessed.
3413 * @param iGReg The general register to/from which the DRx value is being
3414 * store/loaded.
3415 * @param cbInstr The instruction length in bytes.
3416 */
3417IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovDrX(PVMCPU pVCpu, VMXINSTRID uInstrId, uint8_t iDrReg, uint8_t iGReg,
3418 uint8_t cbInstr)
3419{
3420 Assert(iDrReg <= 7);
3421 Assert(uInstrId == VMXINSTRID_MOV_TO_DRX || uInstrId == VMXINSTRID_MOV_FROM_DRX);
3422
3423 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3424 Assert(pVmcs);
3425
3426 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT)
3427 {
3428 uint32_t const uDirection = uInstrId == VMXINSTRID_MOV_TO_DRX ? VMX_EXIT_QUAL_DRX_DIRECTION_WRITE
3429 : VMX_EXIT_QUAL_DRX_DIRECTION_READ;
3430 VMXVEXITINFO ExitInfo;
3431 RT_ZERO(ExitInfo);
3432 ExitInfo.uReason = VMX_EXIT_MOV_DRX;
3433 ExitInfo.cbInstr = cbInstr;
3434 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_REGISTER, iDrReg)
3435 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_DIRECTION, uDirection)
3436 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_GENREG, iGReg);
3437 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3438 }
3439
3440 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3441}
3442
3443
3444/**
3445 * VMX VM-exit handler for VM-exits due to I/O instructions (IN and OUT).
3446 *
3447 * @returns VBox strict status code.
3448 * @param pVCpu The cross context virtual CPU structure.
3449 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_IO_IN or
3450 * VMXINSTRID_IO_OUT).
3451 * @param u16Port The I/O port being accessed.
3452 * @param fImm Whether the I/O port was encoded using an immediate operand
3453 * or the implicit DX register.
3454 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3455 * @param cbInstr The instruction length in bytes.
3456 */
3457IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrIo(PVMCPU pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, bool fImm, uint8_t cbAccess,
3458 uint8_t cbInstr)
3459{
3460 Assert(uInstrId == VMXINSTRID_IO_IN || uInstrId == VMXINSTRID_IO_OUT);
3461 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3462
3463 bool const fIntercept = iemVmxIsIoInterceptSet(pVCpu, u16Port, cbAccess);
3464 if (fIntercept)
3465 {
3466 uint32_t const uDirection = uInstrId == VMXINSTRID_IO_IN ? VMX_EXIT_QUAL_IO_DIRECTION_IN
3467 : VMX_EXIT_QUAL_IO_DIRECTION_OUT;
3468 VMXVEXITINFO ExitInfo;
3469 RT_ZERO(ExitInfo);
3470 ExitInfo.uReason = VMX_EXIT_IO_INSTR;
3471 ExitInfo.cbInstr = cbInstr;
3472 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_WIDTH, cbAccess - 1)
3473 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_DIRECTION, uDirection)
3474 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_ENCODING, fImm)
3475 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_PORT, u16Port);
3476 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3477 }
3478
3479 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3480}
3481
3482
3483/**
3484 * VMX VM-exit handler for VM-exits due to string I/O instructions (INS and OUTS).
3485 *
3486 * @returns VBox strict status code.
3487 * @param pVCpu The cross context virtual CPU structure.
3488 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_IO_INS or
3489 * VMXINSTRID_IO_OUTS).
3490 * @param u16Port The I/O port being accessed.
3491 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3492 * @param fRep Whether the instruction has a REP prefix or not.
3493 * @param ExitInstrInfo The VM-exit instruction info. field.
3494 * @param cbInstr The instruction length in bytes.
3495 */
3496IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrStrIo(PVMCPU pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, uint8_t cbAccess, bool fRep,
3497 VMXEXITINSTRINFO ExitInstrInfo, uint8_t cbInstr)
3498{
3499 Assert(uInstrId == VMXINSTRID_IO_INS || uInstrId == VMXINSTRID_IO_OUTS);
3500 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3501 Assert(ExitInstrInfo.StrIo.iSegReg < X86_SREG_COUNT);
3502 Assert(ExitInstrInfo.StrIo.u3AddrSize == 0 || ExitInstrInfo.StrIo.u3AddrSize == 1 || ExitInstrInfo.StrIo.u3AddrSize == 2);
3503 Assert(uInstrId != VMXINSTRID_IO_INS || ExitInstrInfo.StrIo.iSegReg == X86_SREG_ES);
3504
3505 bool const fIntercept = iemVmxIsIoInterceptSet(pVCpu, u16Port, cbAccess);
3506 if (fIntercept)
3507 {
3508 /*
3509 * Figure out the guest-linear address and the direction bit (INS/OUTS).
3510 */
3511 /** @todo r=ramshankar: Is there something in IEM that already does this? */
3512 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
3513 uint8_t const iSegReg = ExitInstrInfo.StrIo.iSegReg;
3514 uint8_t const uAddrSize = ExitInstrInfo.StrIo.u3AddrSize;
3515 uint64_t const uAddrSizeMask = s_auAddrSizeMasks[uAddrSize];
3516
3517 uint32_t uDirection;
3518 uint64_t uGuestLinearAddr;
3519 if (uInstrId == VMXINSTRID_IO_INS)
3520 {
3521 uDirection = VMX_EXIT_QUAL_IO_DIRECTION_IN;
3522 uGuestLinearAddr = pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base + (pVCpu->cpum.GstCtx.rdi & uAddrSizeMask);
3523 }
3524 else
3525 {
3526 uDirection = VMX_EXIT_QUAL_IO_DIRECTION_OUT;
3527 uGuestLinearAddr = pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base + (pVCpu->cpum.GstCtx.rsi & uAddrSizeMask);
3528 }
3529
3530 /*
3531 * If the segment is ununsable, the guest-linear address in undefined.
3532 * We shall clear it for consistency.
3533 *
3534 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
3535 */
3536 if (pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable)
3537 uGuestLinearAddr = 0;
3538
3539 VMXVEXITINFO ExitInfo;
3540 RT_ZERO(ExitInfo);
3541 ExitInfo.uReason = VMX_EXIT_IO_INSTR;
3542 ExitInfo.cbInstr = cbInstr;
3543 ExitInfo.InstrInfo = ExitInstrInfo;
3544 ExitInfo.u64GuestLinearAddr = uGuestLinearAddr;
3545 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_WIDTH, cbAccess - 1)
3546 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_DIRECTION, uDirection)
3547 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_IS_STRING, 1)
3548 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_IS_REP, fRep)
3549 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_ENCODING, VMX_EXIT_QUAL_IO_ENCODING_DX)
3550 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_PORT, u16Port);
3551 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3552 }
3553
3554 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3555}
3556
3557
3558/**
3559 * VMX VM-exit handler for VM-exits due to MWAIT.
3560 *
3561 * @returns VBox strict status code.
3562 * @param pVCpu The cross context virtual CPU structure.
3563 * @param fMonitorHwArmed Whether the address-range monitor hardware is armed.
3564 * @param cbInstr The instruction length in bytes.
3565 */
3566IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMwait(PVMCPU pVCpu, bool fMonitorHwArmed, uint8_t cbInstr)
3567{
3568 VMXVEXITINFO ExitInfo;
3569 RT_ZERO(ExitInfo);
3570 ExitInfo.uReason = VMX_EXIT_MWAIT;
3571 ExitInfo.cbInstr = cbInstr;
3572 ExitInfo.u64Qual = fMonitorHwArmed;
3573 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3574}
3575
3576
3577/**
3578 * VMX VM-exit handler for VM-exits due to PAUSE.
3579 *
3580 * @returns VBox strict status code.
3581 * @param pVCpu The cross context virtual CPU structure.
3582 * @param cbInstr The instruction length in bytes.
3583 */
3584IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrPause(PVMCPU pVCpu, uint8_t cbInstr)
3585{
3586 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3587 Assert(pVmcs);
3588
3589 /*
3590 * The PAUSE VM-exit is controlled by the "PAUSE exiting" control and the
3591 * "PAUSE-loop exiting" control.
3592 *
3593 * The PLE-Gap is the maximum number of TSC ticks between two successive executions of
3594 * the PAUSE instruction before we cause a VM-exit. The PLE-Window is the maximum amount
3595 * of TSC ticks the guest is allowed to execute in a pause loop before we must cause
3596 * a VM-exit.
3597 *
3598 * See Intel spec. 24.6.13 "Controls for PAUSE-Loop Exiting".
3599 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3600 */
3601 bool fIntercept = false;
3602 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_PAUSE_EXIT)
3603 fIntercept = true;
3604 else if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)
3605 && pVCpu->iem.s.uCpl == 0)
3606 {
3607 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
3608
3609 /*
3610 * A previous-PAUSE-tick value of 0 is used to identify the first time
3611 * execution of a PAUSE instruction after VM-entry at CPL 0. We must
3612 * consider this to be the first execution of PAUSE in a loop according
3613 * to the Intel.
3614 *
3615 * All subsequent records for the previous-PAUSE-tick we ensure that it
3616 * cannot be zero by OR'ing 1 to rule out the TSC wrap-around cases at 0.
3617 */
3618 uint64_t *puFirstPauseLoopTick = &pVCpu->cpum.GstCtx.hwvirt.vmx.uFirstPauseLoopTick;
3619 uint64_t *puPrevPauseTick = &pVCpu->cpum.GstCtx.hwvirt.vmx.uPrevPauseTick;
3620 uint64_t const uTick = TMCpuTickGet(pVCpu);
3621 uint32_t const uPleGap = pVmcs->u32PleGap;
3622 uint32_t const uPleWindow = pVmcs->u32PleWindow;
3623 if ( *puPrevPauseTick == 0
3624 || uTick - *puPrevPauseTick > uPleGap)
3625 *puFirstPauseLoopTick = uTick;
3626 else if (uTick - *puFirstPauseLoopTick > uPleWindow)
3627 fIntercept = true;
3628
3629 *puPrevPauseTick = uTick | 1;
3630 }
3631
3632 if (fIntercept)
3633 {
3634 VMXVEXITINFO ExitInfo;
3635 RT_ZERO(ExitInfo);
3636 ExitInfo.uReason = VMX_EXIT_PAUSE;
3637 ExitInfo.cbInstr = cbInstr;
3638 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3639 }
3640
3641 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3642}
3643
3644
3645/**
3646 * VMX VM-exit handler for VM-exits due to task switches.
3647 *
3648 * @returns VBox strict status code.
3649 * @param pVCpu The cross context virtual CPU structure.
3650 * @param enmTaskSwitch The cause of the task switch.
3651 * @param SelNewTss The selector of the new TSS.
3652 * @param cbInstr The instruction length in bytes.
3653 */
3654IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr)
3655{
3656 /*
3657 * Task-switch VM-exits are unconditional and provide the VM-exit qualification.
3658 *
3659 * If the the cause of the task switch is due to execution of CALL, IRET or the JMP
3660 * instruction or delivery of the exception generated by one of these instructions
3661 * lead to a task switch through a task gate in the IDT, we need to provide the
3662 * VM-exit instruction length. Any other means of invoking a task switch VM-exit
3663 * leaves the VM-exit instruction length field undefined.
3664 *
3665 * See Intel spec. 25.2 "Other Causes Of VM Exits".
3666 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
3667 */
3668 Assert(cbInstr <= 15);
3669
3670 uint8_t uType;
3671 switch (enmTaskSwitch)
3672 {
3673 case IEMTASKSWITCH_CALL: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_CALL; break;
3674 case IEMTASKSWITCH_IRET: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IRET; break;
3675 case IEMTASKSWITCH_JUMP: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_JMP; break;
3676 case IEMTASKSWITCH_INT_XCPT: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT; break;
3677 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3678 }
3679
3680 uint64_t const uExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_NEW_TSS, SelNewTss)
3681 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_SOURCE, uType);
3682 iemVmxVmcsSetExitQual(pVCpu, uExitQual);
3683 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
3684 return iemVmxVmexit(pVCpu, VMX_EXIT_TASK_SWITCH);
3685}
3686
3687
3688/**
3689 * VMX VM-exit handler for VM-exits due to expiry of the preemption timer.
3690 *
3691 * @returns VBox strict status code.
3692 * @param pVCpu The cross context virtual CPU structure.
3693 */
3694IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPU pVCpu)
3695{
3696 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3697 Assert(pVmcs);
3698 Assert(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER);
3699 NOREF(pVmcs);
3700
3701 iemVmxVmcsSetExitQual(pVCpu, 0);
3702 return iemVmxVmexit(pVCpu, VMX_EXIT_PREEMPT_TIMER);
3703}
3704
3705
3706/**
3707 * VMX VM-exit handler for VM-exits due to external interrupts.
3708 *
3709 * @returns VBox strict status code.
3710 * @param pVCpu The cross context virtual CPU structure.
3711 * @param uVector The external interrupt vector.
3712 * @param fIntPending Whether the external interrupt is pending or
3713 * acknowdledged in the interrupt controller.
3714 */
3715IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
3716{
3717 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3718 Assert(pVmcs);
3719
3720 /* The VM-exit is subject to "External interrupt exiting" is being set. */
3721 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT)
3722 {
3723 if (fIntPending)
3724 {
3725 /*
3726 * If the interrupt is pending and we don't need to acknowledge the
3727 * interrupt on VM-exit, cause the VM-exit immediately.
3728 *
3729 * See Intel spec 25.2 "Other Causes Of VM Exits".
3730 */
3731 if (!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT))
3732 {
3733 iemVmxVmcsSetExitIntInfo(pVCpu, 0);
3734 iemVmxVmcsSetExitIntErrCode(pVCpu, 0);
3735 iemVmxVmcsSetExitQual(pVCpu, 0);
3736 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT);
3737 }
3738
3739 /*
3740 * If the interrupt is pending and we -do- need to acknowledge the interrupt
3741 * on VM-exit, postpone VM-exit til after the interrupt controller has been
3742 * acknowledged that the interrupt has been consumed.
3743 */
3744 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3745 }
3746
3747 /*
3748 * If the interrupt is no longer pending (i.e. it has been acknowledged) and the
3749 * "External interrupt exiting" and "Acknowledge interrupt on VM-exit" controls are
3750 * all set, we cause the VM-exit now. We need to record the external interrupt that
3751 * just occurred in the VM-exit interruption information field.
3752 *
3753 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events".
3754 */
3755 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
3756 {
3757 uint8_t const fNmiUnblocking = 0; /** @todo NSTVMX: Implement NMI-unblocking due to IRET. */
3758 uint32_t const uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, uVector)
3759 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_EXT_INT)
3760 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, fNmiUnblocking)
3761 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3762 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3763 iemVmxVmcsSetExitIntErrCode(pVCpu, 0);
3764 iemVmxVmcsSetExitQual(pVCpu, 0);
3765 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT);
3766 }
3767 }
3768
3769 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3770}
3771
3772
3773/**
3774 * VMX VM-exit handler for VM-exits due to startup-IPIs (SIPI).
3775 *
3776 * @returns VBox strict status code.
3777 * @param pVCpu The cross context virtual CPU structure.
3778 * @param uVector The SIPI vector.
3779 */
3780IEM_STATIC VBOXSTRICTRC iemVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
3781{
3782 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3783 Assert(pVmcs);
3784
3785 iemVmxVmcsSetExitQual(pVCpu, uVector);
3786 return iemVmxVmexit(pVCpu, VMX_EXIT_SIPI);
3787}
3788
3789
3790/**
3791 * VMX VM-exit handler for VM-exits due to init-IPIs (INIT).
3792 *
3793 * @returns VBox strict status code.
3794 * @param pVCpu The cross context virtual CPU structure.
3795 */
3796IEM_STATIC VBOXSTRICTRC iemVmxVmexitInitIpi(PVMCPU pVCpu)
3797{
3798 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3799 Assert(pVmcs);
3800
3801 iemVmxVmcsSetExitQual(pVCpu, 0);
3802 return iemVmxVmexit(pVCpu, VMX_EXIT_INIT_SIGNAL);
3803}
3804
3805
3806/**
3807 * VMX VM-exit handler for interrupt-window VM-exits.
3808 *
3809 * @returns VBox strict status code.
3810 * @param pVCpu The cross context virtual CPU structure.
3811 */
3812IEM_STATIC VBOXSTRICTRC iemVmxVmexitIntWindow(PVMCPU pVCpu)
3813{
3814 iemVmxVmcsSetExitQual(pVCpu, 0);
3815 return iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW);
3816}
3817
3818
3819/**
3820 * VMX VM-exit handler for VM-exits due to delivery of an event.
3821 *
3822 * @returns VBox strict status code.
3823 * @param pVCpu The cross context virtual CPU structure.
3824 * @param uVector The interrupt / exception vector.
3825 * @param fFlags The flags (see IEM_XCPT_FLAGS_XXX).
3826 * @param uErrCode The error code associated with the event.
3827 * @param uCr2 The CR2 value in case of a \#PF exception.
3828 * @param cbInstr The instruction length in bytes.
3829 */
3830IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2,
3831 uint8_t cbInstr)
3832{
3833 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3834 Assert(pVmcs);
3835
3836 /*
3837 * If the event is being injected as part of VM-entry, it isn't subject to event
3838 * intercepts in the nested-guest. However, secondary exceptions that occur during
3839 * injection of any event -are- subject to event interception.
3840 *
3841 * See Intel spec. 26.5.1.2 "VM Exits During Event Injection".
3842 */
3843 if (!pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents)
3844 {
3845 /* Update the IDT-vectoring event in the VMCS as the source of the upcoming event. */
3846 uint8_t const uIdtVectoringType = iemVmxGetEventType(uVector, fFlags);
3847 uint8_t const fErrCodeValid = (fFlags & IEM_XCPT_FLAGS_ERR);
3848 uint32_t const uIdtVectoringInfo = RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_VECTOR, uVector)
3849 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_TYPE, uIdtVectoringType)
3850 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_ERR_CODE_VALID, fErrCodeValid)
3851 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_VALID, 1);
3852 iemVmxVmcsSetIdtVectoringInfo(pVCpu, uIdtVectoringInfo);
3853 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, uErrCode);
3854
3855 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = true;
3856 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3857 }
3858
3859 /*
3860 * We are injecting an external interrupt, check if we need to cause a VM-exit now.
3861 * If not, the caller will continue delivery of the external interrupt as it would
3862 * normally.
3863 */
3864 if (fFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3865 {
3866 Assert(!VMX_IDT_VECTORING_INFO_IS_VALID(pVmcs->u32RoIdtVectoringInfo));
3867 return iemVmxVmexitExtInt(pVCpu, uVector, false /* fIntPending */);
3868 }
3869
3870 /*
3871 * Evaluate intercepts for hardware exceptions including #BP, #DB, #OF
3872 * generated by INT3, INT1 (ICEBP) and INTO respectively.
3873 */
3874 Assert(fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_SOFT_INT));
3875 bool fIntercept = false;
3876 bool fIsHwXcpt = false;
3877 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3878 || (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR)))
3879 {
3880 fIsHwXcpt = true;
3881 /* NMIs have a dedicated VM-execution control for causing VM-exits. */
3882 if (uVector == X86_XCPT_NMI)
3883 fIntercept = RT_BOOL(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT);
3884 else
3885 {
3886 /* Page-faults are subject to masking using its error code. */
3887 uint32_t fXcptBitmap = pVmcs->u32XcptBitmap;
3888 if (uVector == X86_XCPT_PF)
3889 {
3890 uint32_t const fXcptPFMask = pVmcs->u32XcptPFMask;
3891 uint32_t const fXcptPFMatch = pVmcs->u32XcptPFMatch;
3892 if ((uErrCode & fXcptPFMask) != fXcptPFMatch)
3893 fXcptBitmap ^= RT_BIT(X86_XCPT_PF);
3894 }
3895
3896 /* Consult the exception bitmap for all hardware exceptions (except NMI). */
3897 if (fXcptBitmap & RT_BIT(uVector))
3898 fIntercept = true;
3899 }
3900 }
3901 /* else: Software interrupts cannot be intercepted and therefore do not cause a VM-exit. */
3902
3903 /*
3904 * Now that we've determined whether the software interrupt or hardware exception
3905 * causes a VM-exit, we need to construct the relevant VM-exit information and
3906 * cause the VM-exit.
3907 */
3908 if (fIntercept)
3909 {
3910 Assert(!(fFlags & IEM_XCPT_FLAGS_T_EXT_INT));
3911
3912 /* Construct the rest of the event related information fields and cause the VM-exit. */
3913 uint64_t uExitQual = 0;
3914 if (fIsHwXcpt)
3915 {
3916 if (uVector == X86_XCPT_PF)
3917 uExitQual = uCr2;
3918 else if (uVector == X86_XCPT_DB)
3919 {
3920 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR6);
3921 uExitQual = pVCpu->cpum.GstCtx.dr[6] & VMX_VMCS_EXIT_QUAL_VALID_MASK;
3922 }
3923 }
3924
3925 uint8_t const fNmiUnblocking = 0; /** @todo NSTVMX: Implement NMI-unblocking due to IRET. */
3926 uint8_t const fErrCodeValid = (fFlags & IEM_XCPT_FLAGS_ERR);
3927 uint8_t const uIntInfoType = iemVmxGetEventType(uVector, fFlags);
3928 uint32_t const uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, uVector)
3929 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, uIntInfoType)
3930 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_ERR_CODE_VALID, fErrCodeValid)
3931 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, fNmiUnblocking)
3932 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3933 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3934 iemVmxVmcsSetExitIntErrCode(pVCpu, uErrCode);
3935 iemVmxVmcsSetExitQual(pVCpu, uExitQual);
3936
3937 /*
3938 * For VM exits due to software exceptions (those generated by INT3 or INTO) or privileged
3939 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
3940 * length.
3941 */
3942 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3943 && (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR)))
3944 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
3945 else
3946 iemVmxVmcsSetExitInstrLen(pVCpu, 0);
3947
3948 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI);
3949 }
3950
3951 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3952}
3953
3954
3955/**
3956 * VMX VM-exit handler for VM-exits due to a triple fault.
3957 *
3958 * @returns VBox strict status code.
3959 * @param pVCpu The cross context virtual CPU structure.
3960 */
3961IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu)
3962{
3963 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3964 Assert(pVmcs);
3965 iemVmxVmcsSetExitQual(pVCpu, 0);
3966 return iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT);
3967}
3968
3969
3970/**
3971 * VMX VM-exit handler for APIC-accesses.
3972 *
3973 * @param pVCpu The cross context virtual CPU structure.
3974 * @param offAccess The offset of the register being accessed.
3975 * @param fAccess The type of access (must contain IEM_ACCESS_TYPE_READ or
3976 * IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION).
3977 */
3978IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess)
3979{
3980 Assert((fAccess & IEM_ACCESS_TYPE_READ) || (fAccess & IEM_ACCESS_TYPE_WRITE) || (fAccess & IEM_ACCESS_INSTRUCTION));
3981
3982 VMXAPICACCESS enmAccess;
3983 bool const fInEventDelivery = IEMGetCurrentXcpt(pVCpu, NULL, NULL, NULL, NULL);
3984 if (fInEventDelivery)
3985 enmAccess = VMXAPICACCESS_LINEAR_EVENT_DELIVERY;
3986 else if (fAccess & IEM_ACCESS_INSTRUCTION)
3987 enmAccess = VMXAPICACCESS_LINEAR_INSTR_FETCH;
3988 else if (fAccess & IEM_ACCESS_TYPE_WRITE)
3989 enmAccess = VMXAPICACCESS_LINEAR_WRITE;
3990 else
3991 enmAccess = VMXAPICACCESS_LINEAR_WRITE;
3992
3993 uint64_t const uExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_OFFSET, offAccess)
3994 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_TYPE, enmAccess);
3995 iemVmxVmcsSetExitQual(pVCpu, uExitQual);
3996 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_ACCESS);
3997}
3998
3999
4000/**
4001 * VMX VM-exit handler for APIC-write VM-exits.
4002 *
4003 * @param pVCpu The cross context virtual CPU structure.
4004 * @param offApic The write to the virtual-APIC page offset that caused this
4005 * VM-exit.
4006 */
4007IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicWrite(PVMCPU pVCpu, uint16_t offApic)
4008{
4009 Assert(offApic < XAPIC_OFF_END + 4);
4010
4011 /* Write only bits 11:0 of the APIC offset into the VM-exit qualification field. */
4012 offApic &= UINT16_C(0xfff);
4013 iemVmxVmcsSetExitQual(pVCpu, offApic);
4014 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_WRITE);
4015}
4016
4017
4018/**
4019 * VMX VM-exit handler for virtualized-EOIs.
4020 *
4021 * @param pVCpu The cross context virtual CPU structure.
4022 */
4023IEM_STATIC VBOXSTRICTRC iemVmxVmexitVirtEoi(PVMCPU pVCpu, uint8_t uVector)
4024{
4025 iemVmxVmcsSetExitQual(pVCpu, uVector);
4026 return iemVmxVmexit(pVCpu, VMX_EXIT_VIRTUALIZED_EOI);
4027}
4028
4029
4030/**
4031 * Sets virtual-APIC write emulation as pending.
4032 *
4033 * @param pVCpu The cross context virtual CPU structure.
4034 * @param offApic The offset in the virtual-APIC page that was written.
4035 */
4036DECLINLINE(void) iemVmxVirtApicSetPendingWrite(PVMCPU pVCpu, uint16_t offApic)
4037{
4038 Assert(offApic < XAPIC_OFF_END + 4);
4039
4040 /*
4041 * Record the currently updated APIC offset, as we need this later for figuring
4042 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4043 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4044 */
4045 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
4046
4047 /*
4048 * Signal that we need to perform virtual-APIC write emulation (TPR/PPR/EOI/Self-IPI
4049 * virtualization or APIC-write emulation).
4050 */
4051 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4052 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
4053}
4054
4055
4056/**
4057 * Clears any pending virtual-APIC write emulation.
4058 *
4059 * @returns The virtual-APIC offset that was written before clearing it.
4060 * @param pVCpu The cross context virtual CPU structure.
4061 */
4062DECLINLINE(uint16_t) iemVmxVirtApicClearPendingWrite(PVMCPU pVCpu)
4063{
4064 uint8_t const offVirtApicWrite = pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite;
4065 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = 0;
4066 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
4067 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
4068 return offVirtApicWrite;
4069}
4070
4071
4072/**
4073 * Reads a 32-bit register from the virtual-APIC page at the given offset.
4074 *
4075 * @returns The register from the virtual-APIC page.
4076 * @param pVCpu The cross context virtual CPU structure.
4077 * @param offReg The offset of the register being read.
4078 */
4079DECLINLINE(uint32_t) iemVmxVirtApicReadRaw32(PVMCPU pVCpu, uint16_t offReg)
4080{
4081 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4082 uint8_t const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4083 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4084 uint32_t const uReg = *(const uint32_t *)(pbVirtApic + offReg);
4085 return uReg;
4086}
4087
4088
4089/**
4090 * Reads a 64-bit register from the virtual-APIC page at the given offset.
4091 *
4092 * @returns The register from the virtual-APIC page.
4093 * @param pVCpu The cross context virtual CPU structure.
4094 * @param offReg The offset of the register being read.
4095 */
4096DECLINLINE(uint64_t) iemVmxVirtApicReadRaw64(PVMCPU pVCpu, uint16_t offReg)
4097{
4098 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4099 uint8_t const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4100 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4101 uint64_t const uReg = *(const uint64_t *)(pbVirtApic + offReg);
4102 return uReg;
4103}
4104
4105
4106/**
4107 * Writes a 32-bit register to the virtual-APIC page at the given offset.
4108 *
4109 * @param pVCpu The cross context virtual CPU structure.
4110 * @param offReg The offset of the register being written.
4111 * @param uReg The register value to write.
4112 */
4113DECLINLINE(void) iemVmxVirtApicWriteRaw32(PVMCPU pVCpu, uint16_t offReg, uint32_t uReg)
4114{
4115 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4116 uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4117 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4118 *(uint32_t *)(pbVirtApic + offReg) = uReg;
4119}
4120
4121
4122/**
4123 * Writes a 64-bit register to the virtual-APIC page at the given offset.
4124 *
4125 * @param pVCpu The cross context virtual CPU structure.
4126 * @param offReg The offset of the register being written.
4127 * @param uReg The register value to write.
4128 */
4129DECLINLINE(void) iemVmxVirtApicWriteRaw64(PVMCPU pVCpu, uint16_t offReg, uint64_t uReg)
4130{
4131 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4132 uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4133 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4134 *(uint64_t *)(pbVirtApic + offReg) = uReg;
4135}
4136
4137
4138/**
4139 * Sets the vector in a virtual-APIC 256-bit sparse register.
4140 *
4141 * @param pVCpu The cross context virtual CPU structure.
4142 * @param offReg The offset of the 256-bit spare register.
4143 * @param uVector The vector to set.
4144 *
4145 * @remarks This is based on our APIC device code.
4146 */
4147DECLINLINE(void) iemVmxVirtApicSetVector(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector)
4148{
4149 Assert(offReg == XAPIC_OFF_ISR0 || offReg == XAPIC_OFF_TMR0 || offReg == XAPIC_OFF_IRR0);
4150 uint8_t *pbBitmap = ((uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)) + offReg;
4151 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1;
4152 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
4153 ASMAtomicBitSet(pbBitmap + offVector, idxVectorBit);
4154}
4155
4156
4157/**
4158 * Clears the vector in a virtual-APIC 256-bit sparse register.
4159 *
4160 * @param pVCpu The cross context virtual CPU structure.
4161 * @param offReg The offset of the 256-bit spare register.
4162 * @param uVector The vector to clear.
4163 *
4164 * @remarks This is based on our APIC device code.
4165 */
4166DECLINLINE(void) iemVmxVirtApicClearVector(PVMCPU pVCpu, uint16_t offReg, uint8_t uVector)
4167{
4168 Assert(offReg == XAPIC_OFF_ISR0 || offReg == XAPIC_OFF_TMR0 || offReg == XAPIC_OFF_IRR0);
4169 uint8_t *pbBitmap = ((uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage)) + offReg;
4170 uint16_t const offVector = (uVector & UINT32_C(0xe0)) >> 1;
4171 uint16_t const idxVectorBit = uVector & UINT32_C(0x1f);
4172 ASMAtomicBitClear(pbBitmap + offVector, idxVectorBit);
4173}
4174
4175
4176/**
4177 * Checks if a memory access to the APIC-access page must causes an APIC-access
4178 * VM-exit.
4179 *
4180 * @param pVCpu The cross context virtual CPU structure.
4181 * @param offAccess The offset of the register being accessed.
4182 * @param cbAccess The size of the access in bytes.
4183 * @param fAccess The type of access (must be IEM_ACCESS_TYPE_READ or
4184 * IEM_ACCESS_TYPE_WRITE).
4185 *
4186 * @remarks This must not be used for MSR-based APIC-access page accesses!
4187 * @sa iemVmxVirtApicAccessMsrWrite, iemVmxVirtApicAccessMsrRead.
4188 */
4189IEM_STATIC bool iemVmxVirtApicIsMemAccessIntercepted(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, uint32_t fAccess)
4190{
4191 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4192 Assert(pVmcs);
4193 Assert(fAccess == IEM_ACCESS_TYPE_READ || fAccess == IEM_ACCESS_TYPE_WRITE);
4194
4195 /*
4196 * We must cause a VM-exit if any of the following are true:
4197 * - TPR shadowing isn't active.
4198 * - The access size exceeds 32-bits.
4199 * - The access is not contained within low 4 bytes of a 16-byte aligned offset.
4200 *
4201 * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
4202 * See Intel spec. 29.4.3.1 "Determining Whether a Write Access is Virtualized".
4203 */
4204 if ( !(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4205 || cbAccess > sizeof(uint32_t)
4206 || ((offAccess + cbAccess - 1) & 0xc)
4207 || offAccess >= XAPIC_OFF_END + 4)
4208 return true;
4209
4210 /*
4211 * If the access is part of an operation where we have already
4212 * virtualized a virtual-APIC write, we must cause a VM-exit.
4213 */
4214 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
4215 return true;
4216
4217 /*
4218 * Check write accesses to the APIC-access page that cause VM-exits.
4219 */
4220 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4221 {
4222 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4223 {
4224 /*
4225 * With APIC-register virtualization, a write access to any of the
4226 * following registers are virtualized. Accessing any other register
4227 * causes a VM-exit.
4228 */
4229 uint16_t const offAlignedAccess = offAccess & 0xfffc;
4230 switch (offAlignedAccess)
4231 {
4232 case XAPIC_OFF_ID:
4233 case XAPIC_OFF_TPR:
4234 case XAPIC_OFF_EOI:
4235 case XAPIC_OFF_LDR:
4236 case XAPIC_OFF_DFR:
4237 case XAPIC_OFF_SVR:
4238 case XAPIC_OFF_ESR:
4239 case XAPIC_OFF_ICR_LO:
4240 case XAPIC_OFF_ICR_HI:
4241 case XAPIC_OFF_LVT_TIMER:
4242 case XAPIC_OFF_LVT_THERMAL:
4243 case XAPIC_OFF_LVT_PERF:
4244 case XAPIC_OFF_LVT_LINT0:
4245 case XAPIC_OFF_LVT_LINT1:
4246 case XAPIC_OFF_LVT_ERROR:
4247 case XAPIC_OFF_TIMER_ICR:
4248 case XAPIC_OFF_TIMER_DCR:
4249 break;
4250 default:
4251 return true;
4252 }
4253 }
4254 else if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4255 {
4256 /*
4257 * With virtual-interrupt delivery, a write access to any of the
4258 * following registers are virtualized. Accessing any other register
4259 * causes a VM-exit.
4260 *
4261 * Note! The specification does not allow writing to offsets in-between
4262 * these registers (e.g. TPR + 1 byte) unlike read accesses.
4263 */
4264 switch (offAccess)
4265 {
4266 case XAPIC_OFF_TPR:
4267 case XAPIC_OFF_EOI:
4268 case XAPIC_OFF_ICR_LO:
4269 break;
4270 default:
4271 return true;
4272 }
4273 }
4274 else
4275 {
4276 /*
4277 * Without APIC-register virtualization or virtual-interrupt delivery,
4278 * only TPR accesses are virtualized.
4279 */
4280 if (offAccess == XAPIC_OFF_TPR)
4281 { /* likely */ }
4282 else
4283 return true;
4284 }
4285 }
4286 else
4287 {
4288 /*
4289 * Check read accesses to the APIC-access page that cause VM-exits.
4290 */
4291 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4292 {
4293 /*
4294 * With APIC-register virtualization, a read access to any of the
4295 * following registers are virtualized. Accessing any other register
4296 * causes a VM-exit.
4297 */
4298 uint16_t const offAlignedAccess = offAccess & 0xfffc;
4299 switch (offAlignedAccess)
4300 {
4301 /** @todo r=ramshankar: What about XAPIC_OFF_LVT_CMCI? */
4302 case XAPIC_OFF_ID:
4303 case XAPIC_OFF_VERSION:
4304 case XAPIC_OFF_TPR:
4305 case XAPIC_OFF_EOI:
4306 case XAPIC_OFF_LDR:
4307 case XAPIC_OFF_DFR:
4308 case XAPIC_OFF_SVR:
4309 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
4310 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
4311 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
4312 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
4313 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
4314 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
4315 case XAPIC_OFF_ESR:
4316 case XAPIC_OFF_ICR_LO:
4317 case XAPIC_OFF_ICR_HI:
4318 case XAPIC_OFF_LVT_TIMER:
4319 case XAPIC_OFF_LVT_THERMAL:
4320 case XAPIC_OFF_LVT_PERF:
4321 case XAPIC_OFF_LVT_LINT0:
4322 case XAPIC_OFF_LVT_LINT1:
4323 case XAPIC_OFF_LVT_ERROR:
4324 case XAPIC_OFF_TIMER_ICR:
4325 case XAPIC_OFF_TIMER_DCR:
4326 break;
4327 default:
4328 return true;
4329 }
4330 }
4331 else
4332 {
4333 /* Without APIC-register virtualization, only TPR accesses are virtualized. */
4334 if (offAccess == XAPIC_OFF_TPR)
4335 { /* likely */ }
4336 else
4337 return true;
4338 }
4339 }
4340
4341 /* The APIC-access is virtualized, does not cause a VM-exit. */
4342 return false;
4343}
4344
4345
4346/**
4347 * Virtualizes a memory-based APIC-access where the address is not used to access
4348 * memory.
4349 *
4350 * This is for instructions like MONITOR, CLFLUSH, CLFLUSHOPT, ENTER which may cause
4351 * page-faults but do not use the address to access memory.
4352 *
4353 * @param pVCpu The cross context virtual CPU structure.
4354 * @param pGCPhysAccess Pointer to the guest-physical address used.
4355 */
4356IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPU pVCpu, PRTGCPHYS pGCPhysAccess)
4357{
4358 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4359 Assert(pVmcs);
4360 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
4361 Assert(pGCPhysAccess);
4362
4363 RTGCPHYS const GCPhysAccess = *pGCPhysAccess & ~(RTGCPHYS)PAGE_OFFSET_MASK;
4364 RTGCPHYS const GCPhysApic = pVmcs->u64AddrApicAccess.u;
4365 Assert(!(GCPhysApic & PAGE_OFFSET_MASK));
4366
4367 if (GCPhysAccess == GCPhysApic)
4368 {
4369 uint16_t const offAccess = *pGCPhysAccess & PAGE_OFFSET_MASK;
4370 uint32_t const fAccess = IEM_ACCESS_TYPE_READ;
4371 uint16_t const cbAccess = 1;
4372 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess);
4373 if (fIntercept)
4374 return iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess);
4375
4376 *pGCPhysAccess = GCPhysApic | offAccess;
4377 return VINF_VMX_MODIFIES_BEHAVIOR;
4378 }
4379
4380 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4381}
4382
4383
4384/**
4385 * Virtualizes a memory-based APIC-access.
4386 *
4387 * @returns VBox strict status code.
4388 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the access was virtualized.
4389 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
4390 *
4391 * @param pVCpu The cross context virtual CPU structure.
4392 * @param offAccess The offset of the register being accessed (within the
4393 * APIC-access page).
4394 * @param cbAccess The size of the access in bytes.
4395 * @param pvData Pointer to the data being written or where to store the data
4396 * being read.
4397 * @param fAccess The type of access (must contain IEM_ACCESS_TYPE_READ or
4398 * IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION).
4399 */
4400IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData,
4401 uint32_t fAccess)
4402{
4403 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4404 Assert(pVmcs);
4405 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
4406 Assert(pvData);
4407 Assert( (fAccess & IEM_ACCESS_TYPE_READ)
4408 || (fAccess & IEM_ACCESS_TYPE_WRITE)
4409 || (fAccess & IEM_ACCESS_INSTRUCTION));
4410
4411 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess);
4412 if (fIntercept)
4413 return iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess);
4414
4415 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4416 {
4417 /*
4418 * A write access to the APIC-access page that is virtualized (rather than
4419 * causing a VM-exit) writes data to the virtual-APIC page.
4420 */
4421 uint32_t const u32Data = *(uint32_t *)pvData;
4422 iemVmxVirtApicWriteRaw32(pVCpu, offAccess, u32Data);
4423
4424 /*
4425 * Record the currently updated APIC offset, as we need this later for figuring
4426 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4427 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4428 *
4429 * After completion of the current operation, we need to perform TPR virtualization,
4430 * EOI virtualization or APIC-write VM-exit depending on which register was written.
4431 *
4432 * The current operation may be a REP-prefixed string instruction, execution of any
4433 * other instruction, or delivery of an event through the IDT.
4434 *
4435 * Thus things like clearing bytes 3:1 of the VTPR, clearing VEOI are not to be
4436 * performed now but later after completion of the current operation.
4437 *
4438 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4439 */
4440 iemVmxVirtApicSetPendingWrite(pVCpu, offAccess);
4441 }
4442 else
4443 {
4444 /*
4445 * A read access from the APIC-access page that is virtualized (rather than
4446 * causing a VM-exit) returns data from the virtual-APIC page.
4447 *
4448 * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
4449 */
4450 Assert(cbAccess <= 4);
4451 Assert(offAccess < XAPIC_OFF_END + 4);
4452 static uint32_t const s_auAccessSizeMasks[] = { 0, 0xff, 0xffff, 0xffffff, 0xffffffff };
4453
4454 uint32_t u32Data = iemVmxVirtApicReadRaw32(pVCpu, offAccess);
4455 u32Data &= s_auAccessSizeMasks[cbAccess];
4456 *(uint32_t *)pvData = u32Data;
4457 }
4458
4459 return VINF_VMX_MODIFIES_BEHAVIOR;
4460}
4461
4462
4463/**
4464 * Virtualizes an MSR-based APIC read access.
4465 *
4466 * @returns VBox strict status code.
4467 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR read was virtualized.
4468 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR read access must be
4469 * handled by the x2APIC device.
4470 * @retval VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
4471 * not within the range of valid MSRs, caller must raise \#GP(0).
4472 * @param pVCpu The cross context virtual CPU structure.
4473 * @param idMsr The x2APIC MSR being read.
4474 * @param pu64Value Where to store the read x2APIC MSR value (only valid when
4475 * VINF_VMX_MODIFIES_BEHAVIOR is returned).
4476 */
4477IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value)
4478{
4479 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4480 Assert(pVmcs);
4481 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
4482 Assert(pu64Value);
4483
4484 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4485 {
4486 /*
4487 * Intel has different ideas in the x2APIC spec. vs the VT-x spec. as to
4488 * what the end of the valid x2APIC MSR range is. Hence the use of different
4489 * macros here.
4490 *
4491 * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
4492 * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
4493 */
4494 if ( idMsr >= VMX_V_VIRT_APIC_MSR_START
4495 && idMsr <= VMX_V_VIRT_APIC_MSR_END)
4496 {
4497 uint16_t const offReg = (idMsr & 0xff) << 4;
4498 uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
4499 *pu64Value = u64Value;
4500 return VINF_VMX_MODIFIES_BEHAVIOR;
4501 }
4502 return VERR_OUT_OF_RANGE;
4503 }
4504
4505 if (idMsr == MSR_IA32_X2APIC_TPR)
4506 {
4507 uint16_t const offReg = (idMsr & 0xff) << 4;
4508 uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
4509 *pu64Value = u64Value;
4510 return VINF_VMX_MODIFIES_BEHAVIOR;
4511 }
4512
4513 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4514}
4515
4516
4517/**
4518 * Virtualizes an MSR-based APIC write access.
4519 *
4520 * @returns VBox strict status code.
4521 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR write was virtualized.
4522 * @retval VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
4523 * not within the range of valid MSRs, caller must raise \#GP(0).
4524 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR must be written normally.
4525 *
4526 * @param pVCpu The cross context virtual CPU structure.
4527 * @param idMsr The x2APIC MSR being written.
4528 * @param u64Value The value of the x2APIC MSR being written.
4529 */
4530IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value)
4531{
4532 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4533 Assert(pVmcs);
4534
4535 /*
4536 * Check if the access is to be virtualized.
4537 * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
4538 */
4539 if ( idMsr == MSR_IA32_X2APIC_TPR
4540 || ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4541 && ( idMsr == MSR_IA32_X2APIC_EOI
4542 || idMsr == MSR_IA32_X2APIC_SELF_IPI)))
4543 {
4544 /* Validate the MSR write depending on the register. */
4545 switch (idMsr)
4546 {
4547 case MSR_IA32_X2APIC_TPR:
4548 case MSR_IA32_X2APIC_SELF_IPI:
4549 {
4550 if (u64Value & UINT64_C(0xffffffffffffff00))
4551 return VERR_OUT_OF_RANGE;
4552 break;
4553 }
4554 case MSR_IA32_X2APIC_EOI:
4555 {
4556 if (u64Value != 0)
4557 return VERR_OUT_OF_RANGE;
4558 break;
4559 }
4560 }
4561
4562 /* Write the MSR to the virtual-APIC page. */
4563 uint16_t const offReg = (idMsr & 0xff) << 4;
4564 iemVmxVirtApicWriteRaw64(pVCpu, offReg, u64Value);
4565
4566 /*
4567 * Record the currently updated APIC offset, as we need this later for figuring
4568 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4569 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4570 */
4571 iemVmxVirtApicSetPendingWrite(pVCpu, offReg);
4572
4573 return VINF_VMX_MODIFIES_BEHAVIOR;
4574 }
4575
4576 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4577}
4578
4579
4580/**
4581 * Finds the most significant set bit in a virtual-APIC 256-bit sparse register.
4582 *
4583 * @returns VBox status code.
4584 * @retval VINF_SUCCES when the highest set bit is found.
4585 * @retval VERR_NOT_FOUND when no bit is set.
4586 *
4587 * @param pVCpu The cross context virtual CPU structure.
4588 * @param offReg The offset of the APIC 256-bit sparse register.
4589 * @param pidxHighestBit Where to store the highest bit (most significant bit)
4590 * set in the register. Only valid when VINF_SUCCESS is
4591 * returned.
4592 *
4593 * @remarks The format of the 256-bit sparse register here mirrors that found in
4594 * real APIC hardware.
4595 */
4596static int iemVmxVirtApicGetHighestSetBitInReg(PVMCPU pVCpu, uint16_t offReg, uint8_t *pidxHighestBit)
4597{
4598 Assert(offReg < XAPIC_OFF_END + 4);
4599 Assert(pidxHighestBit);
4600
4601 /*
4602 * There are 8 contiguous fragments (of 16-bytes each) in the sparse register.
4603 * However, in each fragment only the first 4 bytes are used.
4604 */
4605 uint8_t const cFrags = 8;
4606 for (int8_t iFrag = cFrags; iFrag >= 0; iFrag--)
4607 {
4608 uint16_t const offFrag = iFrag * 16;
4609 uint32_t const u32Frag = iemVmxVirtApicReadRaw32(pVCpu, offFrag);
4610 if (!u32Frag)
4611 continue;
4612
4613 unsigned idxHighestBit = ASMBitLastSetU32(u32Frag);
4614 Assert(idxHighestBit > 0);
4615 --idxHighestBit;
4616 Assert(idxHighestBit <= UINT8_MAX);
4617 *pidxHighestBit = idxHighestBit;
4618 return VINF_SUCCESS;
4619 }
4620 return VERR_NOT_FOUND;
4621}
4622
4623
4624/**
4625 * Evaluates pending virtual interrupts.
4626 *
4627 * @param pVCpu The cross context virtual CPU structure.
4628 */
4629IEM_STATIC void iemVmxEvalPendingVirtIntrs(PVMCPU pVCpu)
4630{
4631 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4632 Assert(pVmcs);
4633 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4634
4635 if (!(pVmcs->u32ProcCtls & VMX_PROC_CTLS_INT_WINDOW_EXIT))
4636 {
4637 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
4638 uint8_t const uPpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_PPR);
4639
4640 if ((uRvi >> 4) > (uPpr >> 4))
4641 {
4642 Log2(("eval_virt_intrs: uRvi=%#x uPpr=%#x - Signaling pending interrupt\n", uRvi, uPpr));
4643 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
4644 }
4645 else
4646 Log2(("eval_virt_intrs: uRvi=%#x uPpr=%#x - Nothing to do\n", uRvi, uPpr));
4647 }
4648}
4649
4650
4651/**
4652 * Performs PPR virtualization.
4653 *
4654 * @returns VBox strict status code.
4655 * @param pVCpu The cross context virtual CPU structure.
4656 */
4657IEM_STATIC void iemVmxPprVirtualization(PVMCPU pVCpu)
4658{
4659 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4660 Assert(pVmcs);
4661 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4662 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4663
4664 /*
4665 * PPR virtualization is caused in response to a VM-entry, TPR-virtualization,
4666 * or EOI-virtualization.
4667 *
4668 * See Intel spec. 29.1.3 "PPR Virtualization".
4669 */
4670 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4671 uint32_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
4672
4673 uint32_t uPpr;
4674 if (((uTpr >> 4) & 0xf) >= ((uSvi >> 4) & 0xf))
4675 uPpr = uTpr & 0xff;
4676 else
4677 uPpr = uSvi & 0xf0;
4678
4679 Log2(("ppr_virt: uTpr=%#x uSvi=%#x uPpr=%#x\n", uTpr, uSvi, uPpr));
4680 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_PPR, uPpr);
4681}
4682
4683
4684/**
4685 * Performs VMX TPR virtualization.
4686 *
4687 * @returns VBox strict status code.
4688 * @param pVCpu The cross context virtual CPU structure.
4689 */
4690IEM_STATIC VBOXSTRICTRC iemVmxTprVirtualization(PVMCPU pVCpu)
4691{
4692 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4693 Assert(pVmcs);
4694 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4695
4696 /*
4697 * We should have already performed the virtual-APIC write to the TPR offset
4698 * in the virtual-APIC page. We now perform TPR virtualization.
4699 *
4700 * See Intel spec. 29.1.2 "TPR Virtualization".
4701 */
4702 if (!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
4703 {
4704 uint32_t const uTprThreshold = pVmcs->u32TprThreshold;
4705 uint32_t const uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4706
4707 /*
4708 * If the VTPR falls below the TPR threshold, we must cause a VM-exit.
4709 * See Intel spec. 29.1.2 "TPR Virtualization".
4710 */
4711 if (((uTpr >> 4) & 0xf) < uTprThreshold)
4712 {
4713 Log2(("tpr_virt: uTpr=%u uTprThreshold=%u -> VM-exit\n", uTpr, uTprThreshold));
4714 iemVmxVmcsSetExitQual(pVCpu, 0);
4715 return iemVmxVmexit(pVCpu, VMX_EXIT_TPR_BELOW_THRESHOLD);
4716 }
4717 }
4718 else
4719 {
4720 iemVmxPprVirtualization(pVCpu);
4721 iemVmxEvalPendingVirtIntrs(pVCpu);
4722 }
4723
4724 return VINF_SUCCESS;
4725}
4726
4727
4728/**
4729 * Checks whether an EOI write for the given interrupt vector causes a VM-exit or
4730 * not.
4731 *
4732 * @returns @c true if the EOI write is intercepted, @c false otherwise.
4733 * @param pVCpu The cross context virtual CPU structure.
4734 * @param uVector The interrupt that was acknowledged using an EOI.
4735 */
4736IEM_STATIC bool iemVmxIsEoiInterceptSet(PVMCPU pVCpu, uint8_t uVector)
4737{
4738 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4739 Assert(pVmcs);
4740 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4741
4742 if (uVector < 64)
4743 return RT_BOOL(pVmcs->u64EoiExitBitmap0.u & RT_BIT_64(uVector));
4744 if (uVector < 128)
4745 return RT_BOOL(pVmcs->u64EoiExitBitmap1.u & RT_BIT_64(uVector));
4746 if (uVector < 192)
4747 return RT_BOOL(pVmcs->u64EoiExitBitmap2.u & RT_BIT_64(uVector));
4748 return RT_BOOL(pVmcs->u64EoiExitBitmap3.u & RT_BIT_64(uVector));
4749}
4750
4751
4752/**
4753 * Performs EOI virtualization.
4754 *
4755 * @returns VBox strict status code.
4756 * @param pVCpu The cross context virtual CPU structure.
4757 */
4758IEM_STATIC VBOXSTRICTRC iemVmxEoiVirtualization(PVMCPU pVCpu)
4759{
4760 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4761 Assert(pVmcs);
4762 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4763
4764 /*
4765 * Clear the interrupt guest-interrupt as no longer in-service (ISR)
4766 * and get the next guest-interrupt that's in-service (if any).
4767 *
4768 * See Intel spec. 29.1.4 "EOI Virtualization".
4769 */
4770 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
4771 uint8_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
4772 Log2(("eoi_virt: uRvi=%#x uSvi=%#x\n", uRvi, uSvi));
4773
4774 uint8_t uVector = uSvi;
4775 iemVmxVirtApicClearVector(pVCpu, XAPIC_OFF_ISR0, uVector);
4776
4777 uVector = 0;
4778 iemVmxVirtApicGetHighestSetBitInReg(pVCpu, XAPIC_OFF_ISR0, &uVector);
4779
4780 if (uVector)
4781 Log2(("eoi_virt: next interrupt %#x\n", uVector));
4782 else
4783 Log2(("eoi_virt: no interrupt pending in ISR\n"));
4784
4785 /* Update guest-interrupt status SVI (leave RVI portion as it is) in the VMCS. */
4786 pVmcs->u16GuestIntStatus = RT_MAKE_U16(uRvi, uVector);
4787
4788 iemVmxPprVirtualization(pVCpu);
4789 if (iemVmxIsEoiInterceptSet(pVCpu, uVector))
4790 return iemVmxVmexitVirtEoi(pVCpu, uVector);
4791 iemVmxEvalPendingVirtIntrs(pVCpu);
4792 return VINF_SUCCESS;
4793}
4794
4795
4796/**
4797 * Performs self-IPI virtualization.
4798 *
4799 * @returns VBox strict status code.
4800 * @param pVCpu The cross context virtual CPU structure.
4801 */
4802IEM_STATIC VBOXSTRICTRC iemVmxSelfIpiVirtualization(PVMCPU pVCpu)
4803{
4804 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4805 Assert(pVmcs);
4806 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4807
4808 /*
4809 * We should have already performed the virtual-APIC write to the self-IPI offset
4810 * in the virtual-APIC page. We now perform self-IPI virtualization.
4811 *
4812 * See Intel spec. 29.1.5 "Self-IPI Virtualization".
4813 */
4814 uint8_t const uVector = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_ICR_LO);
4815 Log2(("self_ipi_virt: uVector=%#x\n", uVector));
4816 iemVmxVirtApicSetVector(pVCpu, XAPIC_OFF_IRR0, uVector);
4817 uint8_t const uRvi = RT_LO_U8(pVmcs->u16GuestIntStatus);
4818 uint8_t const uSvi = RT_HI_U8(pVmcs->u16GuestIntStatus);
4819 if (uVector > uRvi)
4820 pVmcs->u16GuestIntStatus = RT_MAKE_U16(uVector, uSvi);
4821 iemVmxEvalPendingVirtIntrs(pVCpu);
4822 return VINF_SUCCESS;
4823}
4824
4825
4826/**
4827 * Performs VMX APIC-write emulation.
4828 *
4829 * @returns VBox strict status code.
4830 * @param pVCpu The cross context virtual CPU structure.
4831 */
4832IEM_STATIC VBOXSTRICTRC iemVmxApicWriteEmulation(PVMCPU pVCpu)
4833{
4834 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4835 Assert(pVmcs);
4836 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT);
4837
4838 /*
4839 * Perform APIC-write emulation based on the virtual-APIC register written.
4840 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4841 */
4842 uint16_t const offApicWrite = iemVmxVirtApicClearPendingWrite(pVCpu);
4843 VBOXSTRICTRC rcStrict;
4844 switch (offApicWrite)
4845 {
4846 case XAPIC_OFF_TPR:
4847 {
4848 /* Clear bytes 3:1 of the VTPR and perform TPR virtualization. */
4849 uint32_t uTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4850 uTpr &= UINT32_C(0x000000ff);
4851 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_TPR, uTpr);
4852 Log2(("iemVmxApicWriteEmulation: TPR write %#x\n", uTpr));
4853 rcStrict = iemVmxTprVirtualization(pVCpu);
4854 break;
4855 }
4856
4857 case XAPIC_OFF_EOI:
4858 {
4859 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4860 {
4861 /* Clear VEOI and perform EOI virtualization. */
4862 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_EOI, 0);
4863 Log2(("iemVmxApicWriteEmulation: EOI write\n"));
4864 rcStrict = iemVmxEoiVirtualization(pVCpu);
4865 }
4866 else
4867 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4868 break;
4869 }
4870
4871 case XAPIC_OFF_ICR_LO:
4872 {
4873 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4874 {
4875 /* If the ICR_LO is valid, write it and perform self-IPI virtualization. */
4876 uint32_t const uIcrLo = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4877 uint32_t const fIcrLoMb0 = UINT32_C(0xfffbb700);
4878 uint32_t const fIcrLoMb1 = UINT32_C(0x000000f0);
4879 if ( !(uIcrLo & fIcrLoMb0)
4880 && (uIcrLo & fIcrLoMb1))
4881 {
4882 Log2(("iemVmxApicWriteEmulation: Self-IPI virtualization with vector %#x\n", (uIcrLo & 0xff)));
4883 rcStrict = iemVmxSelfIpiVirtualization(pVCpu);
4884 }
4885 else
4886 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4887 }
4888 else
4889 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4890 break;
4891 }
4892
4893 case XAPIC_OFF_ICR_HI:
4894 {
4895 /* Clear bytes 2:0 of VICR_HI. No other virtualization or VM-exit must occur. */
4896 uint32_t uIcrHi = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_ICR_HI);
4897 uIcrHi &= UINT32_C(0xff000000);
4898 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_ICR_HI, uIcrHi);
4899 rcStrict = VINF_SUCCESS;
4900 break;
4901 }
4902
4903 default:
4904 {
4905 /* Writes to any other virtual-APIC register causes an APIC-write VM-exit. */
4906 rcStrict = iemVmxVmexitApicWrite(pVCpu, offApicWrite);
4907 break;
4908 }
4909 }
4910
4911 return rcStrict;
4912}
4913
4914
4915/**
4916 * Checks guest control registers, debug registers and MSRs as part of VM-entry.
4917 *
4918 * @param pVCpu The cross context virtual CPU structure.
4919 * @param pszInstr The VMX instruction name (for logging purposes).
4920 */
4921IEM_STATIC int iemVmxVmentryCheckGuestControlRegsMsrs(PVMCPU pVCpu, const char *pszInstr)
4922{
4923 /*
4924 * Guest Control Registers, Debug Registers, and MSRs.
4925 * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs".
4926 */
4927 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4928 const char *const pszFailure = "VM-exit";
4929 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
4930
4931 /* CR0 reserved bits. */
4932 {
4933 /* CR0 MB1 bits. */
4934 uint64_t u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
4935 Assert(!(u64Cr0Fixed0 & (X86_CR0_NW | X86_CR0_CD)));
4936 if (fUnrestrictedGuest)
4937 u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
4938 if ((pVmcs->u64GuestCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0)
4939 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed0);
4940
4941 /* CR0 MBZ bits. */
4942 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
4943 if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1)
4944 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1);
4945
4946 /* Without unrestricted guest support, VT-x supports does not support unpaged protected mode. */
4947 if ( !fUnrestrictedGuest
4948 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)
4949 && !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
4950 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0PgPe);
4951 }
4952
4953 /* CR4 reserved bits. */
4954 {
4955 /* CR4 MB1 bits. */
4956 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
4957 if ((pVmcs->u64GuestCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
4958 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0);
4959
4960 /* CR4 MBZ bits. */
4961 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
4962 if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1)
4963 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1);
4964 }
4965
4966 /* DEBUGCTL MSR. */
4967 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4968 && (pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL))
4969 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDebugCtl);
4970
4971 /* 64-bit CPU checks. */
4972 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4973 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4974 {
4975 if (fGstInLongMode)
4976 {
4977 /* PAE must be set. */
4978 if ( (pVmcs->u64GuestCr0.u & X86_CR0_PG)
4979 && (pVmcs->u64GuestCr0.u & X86_CR4_PAE))
4980 { /* likely */ }
4981 else
4982 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPae);
4983 }
4984 else
4985 {
4986 /* PCIDE should not be set. */
4987 if (!(pVmcs->u64GuestCr4.u & X86_CR4_PCIDE))
4988 { /* likely */ }
4989 else
4990 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPcide);
4991 }
4992
4993 /* CR3. */
4994 if (!(pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
4995 { /* likely */ }
4996 else
4997 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr3);
4998
4999 /* DR7. */
5000 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
5001 && (pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK))
5002 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDr7);
5003
5004 /* SYSENTER ESP and SYSENTER EIP. */
5005 if ( X86_IS_CANONICAL(pVmcs->u64GuestSysenterEsp.u)
5006 && X86_IS_CANONICAL(pVmcs->u64GuestSysenterEip.u))
5007 { /* likely */ }
5008 else
5009 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSysenterEspEip);
5010 }
5011
5012 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
5013 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
5014
5015 /* PAT MSR. */
5016 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
5017 && !CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u))
5018 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPatMsr);
5019
5020 /* EFER MSR. */
5021 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
5022 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
5023 && (pVmcs->u64GuestEferMsr.u & ~uValidEferMask))
5024 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsrRsvd);
5025
5026 bool const fGstLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
5027 bool const fGstLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
5028 if ( fGstInLongMode == fGstLma
5029 && ( !(pVmcs->u64GuestCr0.u & X86_CR0_PG)
5030 || fGstLma == fGstLme))
5031 { /* likely */ }
5032 else
5033 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsr);
5034
5035 /* We don't support IA32_BNDCFGS MSR yet. */
5036 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
5037
5038 NOREF(pszInstr);
5039 NOREF(pszFailure);
5040 return VINF_SUCCESS;
5041}
5042
5043
5044/**
5045 * Checks guest segment registers, LDTR and TR as part of VM-entry.
5046 *
5047 * @param pVCpu The cross context virtual CPU structure.
5048 * @param pszInstr The VMX instruction name (for logging purposes).
5049 */
5050IEM_STATIC int iemVmxVmentryCheckGuestSegRegs(PVMCPU pVCpu, const char *pszInstr)
5051{
5052 /*
5053 * Segment registers.
5054 * See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
5055 */
5056 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5057 const char *const pszFailure = "VM-exit";
5058 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);
5059 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
5060 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5061
5062 /* Selectors. */
5063 if ( !fGstInV86Mode
5064 && !fUnrestrictedGuest
5065 && (pVmcs->GuestSs & X86_SEL_RPL) != (pVmcs->GuestCs & X86_SEL_RPL))
5066 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelCsSsRpl);
5067
5068 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
5069 {
5070 CPUMSELREG SelReg;
5071 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &SelReg);
5072 if (RT_LIKELY(rc == VINF_SUCCESS))
5073 { /* likely */ }
5074 else
5075 return rc;
5076
5077 /*
5078 * Virtual-8086 mode checks.
5079 */
5080 if (fGstInV86Mode)
5081 {
5082 /* Base address. */
5083 if (SelReg.u64Base == (uint64_t)SelReg.Sel << 4)
5084 { /* likely */ }
5085 else
5086 {
5087 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBaseV86(iSegReg);
5088 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5089 }
5090
5091 /* Limit. */
5092 if (SelReg.u32Limit == 0xffff)
5093 { /* likely */ }
5094 else
5095 {
5096 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegLimitV86(iSegReg);
5097 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5098 }
5099
5100 /* Attribute. */
5101 if (SelReg.Attr.u == 0xf3)
5102 { /* likely */ }
5103 else
5104 {
5105 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrV86(iSegReg);
5106 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5107 }
5108
5109 /* We're done; move to checking the next segment. */
5110 continue;
5111 }
5112
5113 /* Checks done by 64-bit CPUs. */
5114 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5115 {
5116 /* Base address. */
5117 if ( iSegReg == X86_SREG_FS
5118 || iSegReg == X86_SREG_GS)
5119 {
5120 if (X86_IS_CANONICAL(SelReg.u64Base))
5121 { /* likely */ }
5122 else
5123 {
5124 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
5125 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5126 }
5127 }
5128 else if (iSegReg == X86_SREG_CS)
5129 {
5130 if (!RT_HI_U32(SelReg.u64Base))
5131 { /* likely */ }
5132 else
5133 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseCs);
5134 }
5135 else
5136 {
5137 if ( SelReg.Attr.n.u1Unusable
5138 || !RT_HI_U32(SelReg.u64Base))
5139 { /* likely */ }
5140 else
5141 {
5142 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
5143 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5144 }
5145 }
5146 }
5147
5148 /*
5149 * Checks outside Virtual-8086 mode.
5150 */
5151 uint8_t const uSegType = SelReg.Attr.n.u4Type;
5152 uint8_t const fCodeDataSeg = SelReg.Attr.n.u1DescType;
5153 uint8_t const fUsable = !SelReg.Attr.n.u1Unusable;
5154 uint8_t const uDpl = SelReg.Attr.n.u2Dpl;
5155 uint8_t const fPresent = SelReg.Attr.n.u1Present;
5156 uint8_t const uGranularity = SelReg.Attr.n.u1Granularity;
5157 uint8_t const uDefBig = SelReg.Attr.n.u1DefBig;
5158 uint8_t const fSegLong = SelReg.Attr.n.u1Long;
5159
5160 /* Code or usable segment. */
5161 if ( iSegReg == X86_SREG_CS
5162 || fUsable)
5163 {
5164 /* Reserved bits (bits 31:17 and bits 11:8). */
5165 if (!(SelReg.Attr.u & 0xfffe0f00))
5166 { /* likely */ }
5167 else
5168 {
5169 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrRsvd(iSegReg);
5170 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5171 }
5172
5173 /* Descriptor type. */
5174 if (fCodeDataSeg)
5175 { /* likely */ }
5176 else
5177 {
5178 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDescType(iSegReg);
5179 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5180 }
5181
5182 /* Present. */
5183 if (fPresent)
5184 { /* likely */ }
5185 else
5186 {
5187 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrPresent(iSegReg);
5188 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5189 }
5190
5191 /* Granularity. */
5192 if ( ((SelReg.u32Limit & 0x00000fff) == 0x00000fff || !uGranularity)
5193 && ((SelReg.u32Limit & 0xfff00000) == 0x00000000 || uGranularity))
5194 { /* likely */ }
5195 else
5196 {
5197 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrGran(iSegReg);
5198 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5199 }
5200 }
5201
5202 if (iSegReg == X86_SREG_CS)
5203 {
5204 /* Segment Type and DPL. */
5205 if ( uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5206 && fUnrestrictedGuest)
5207 {
5208 if (uDpl == 0)
5209 { /* likely */ }
5210 else
5211 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplZero);
5212 }
5213 else if ( uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_ACCESSED)
5214 || uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
5215 {
5216 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5217 if (uDpl == AttrSs.n.u2Dpl)
5218 { /* likely */ }
5219 else
5220 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplEqSs);
5221 }
5222 else if ((uSegType & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
5223 == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
5224 {
5225 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5226 if (uDpl <= AttrSs.n.u2Dpl)
5227 { /* likely */ }
5228 else
5229 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplLtSs);
5230 }
5231 else
5232 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsType);
5233
5234 /* Def/Big. */
5235 if ( fGstInLongMode
5236 && fSegLong)
5237 {
5238 if (uDefBig == 0)
5239 { /* likely */ }
5240 else
5241 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDefBig);
5242 }
5243 }
5244 else if (iSegReg == X86_SREG_SS)
5245 {
5246 /* Segment Type. */
5247 if ( !fUsable
5248 || uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5249 || uSegType == (X86_SEL_TYPE_DOWN | X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED))
5250 { /* likely */ }
5251 else
5252 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsType);
5253
5254 /* DPL. */
5255 if (fUnrestrictedGuest)
5256 {
5257 if (uDpl == (SelReg.Sel & X86_SEL_RPL))
5258 { /* likely */ }
5259 else
5260 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplEqRpl);
5261 }
5262 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
5263 if ( AttrCs.n.u4Type == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
5264 || (pVmcs->u64GuestCr0.u & X86_CR0_PE))
5265 {
5266 if (uDpl == 0)
5267 { /* likely */ }
5268 else
5269 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplZero);
5270 }
5271 }
5272 else
5273 {
5274 /* DS, ES, FS, GS. */
5275 if (fUsable)
5276 {
5277 /* Segment type. */
5278 if (uSegType & X86_SEL_TYPE_ACCESSED)
5279 { /* likely */ }
5280 else
5281 {
5282 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrTypeAcc(iSegReg);
5283 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5284 }
5285
5286 if ( !(uSegType & X86_SEL_TYPE_CODE)
5287 || (uSegType & X86_SEL_TYPE_READ))
5288 { /* likely */ }
5289 else
5290 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsTypeRead);
5291
5292 /* DPL. */
5293 if ( !fUnrestrictedGuest
5294 && uSegType <= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
5295 {
5296 if (uDpl >= (SelReg.Sel & X86_SEL_RPL))
5297 { /* likely */ }
5298 else
5299 {
5300 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDplRpl(iSegReg);
5301 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5302 }
5303 }
5304 }
5305 }
5306 }
5307
5308 /*
5309 * LDTR.
5310 */
5311 {
5312 CPUMSELREG Ldtr;
5313 Ldtr.Sel = pVmcs->GuestLdtr;
5314 Ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
5315 Ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
5316 Ldtr.Attr.u = pVmcs->u32GuestLdtrLimit;
5317
5318 if (!Ldtr.Attr.n.u1Unusable)
5319 {
5320 /* Selector. */
5321 if (!(Ldtr.Sel & X86_SEL_LDT))
5322 { /* likely */ }
5323 else
5324 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelLdtr);
5325
5326 /* Base. */
5327 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5328 {
5329 if (X86_IS_CANONICAL(Ldtr.u64Base))
5330 { /* likely */ }
5331 else
5332 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseLdtr);
5333 }
5334
5335 /* Attributes. */
5336 /* Reserved bits (bits 31:17 and bits 11:8). */
5337 if (!(Ldtr.Attr.u & 0xfffe0f00))
5338 { /* likely */ }
5339 else
5340 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrRsvd);
5341
5342 if (Ldtr.Attr.n.u4Type == X86_SEL_TYPE_SYS_LDT)
5343 { /* likely */ }
5344 else
5345 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrType);
5346
5347 if (!Ldtr.Attr.n.u1DescType)
5348 { /* likely */ }
5349 else
5350 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrDescType);
5351
5352 if (Ldtr.Attr.n.u1Present)
5353 { /* likely */ }
5354 else
5355 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrPresent);
5356
5357 if ( ((Ldtr.u32Limit & 0x00000fff) == 0x00000fff || !Ldtr.Attr.n.u1Granularity)
5358 && ((Ldtr.u32Limit & 0xfff00000) == 0x00000000 || Ldtr.Attr.n.u1Granularity))
5359 { /* likely */ }
5360 else
5361 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrGran);
5362 }
5363 }
5364
5365 /*
5366 * TR.
5367 */
5368 {
5369 CPUMSELREG Tr;
5370 Tr.Sel = pVmcs->GuestTr;
5371 Tr.u32Limit = pVmcs->u32GuestTrLimit;
5372 Tr.u64Base = pVmcs->u64GuestTrBase.u;
5373 Tr.Attr.u = pVmcs->u32GuestTrLimit;
5374
5375 /* Selector. */
5376 if (!(Tr.Sel & X86_SEL_LDT))
5377 { /* likely */ }
5378 else
5379 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelTr);
5380
5381 /* Base. */
5382 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5383 {
5384 if (X86_IS_CANONICAL(Tr.u64Base))
5385 { /* likely */ }
5386 else
5387 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseTr);
5388 }
5389
5390 /* Attributes. */
5391 /* Reserved bits (bits 31:17 and bits 11:8). */
5392 if (!(Tr.Attr.u & 0xfffe0f00))
5393 { /* likely */ }
5394 else
5395 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrRsvd);
5396
5397 if (!Tr.Attr.n.u1Unusable)
5398 { /* likely */ }
5399 else
5400 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrUnusable);
5401
5402 if ( Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY
5403 || ( !fGstInLongMode
5404 && Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY))
5405 { /* likely */ }
5406 else
5407 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrType);
5408
5409 if (!Tr.Attr.n.u1DescType)
5410 { /* likely */ }
5411 else
5412 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrDescType);
5413
5414 if (Tr.Attr.n.u1Present)
5415 { /* likely */ }
5416 else
5417 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrPresent);
5418
5419 if ( ((Tr.u32Limit & 0x00000fff) == 0x00000fff || !Tr.Attr.n.u1Granularity)
5420 && ((Tr.u32Limit & 0xfff00000) == 0x00000000 || Tr.Attr.n.u1Granularity))
5421 { /* likely */ }
5422 else
5423 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrGran);
5424 }
5425
5426 NOREF(pszInstr);
5427 NOREF(pszFailure);
5428 return VINF_SUCCESS;
5429}
5430
5431
5432/**
5433 * Checks guest GDTR and IDTR as part of VM-entry.
5434 *
5435 * @param pVCpu The cross context virtual CPU structure.
5436 * @param pszInstr The VMX instruction name (for logging purposes).
5437 */
5438IEM_STATIC int iemVmxVmentryCheckGuestGdtrIdtr(PVMCPU pVCpu, const char *pszInstr)
5439{
5440 /*
5441 * GDTR and IDTR.
5442 * See Intel spec. 26.3.1.3 "Checks on Guest Descriptor-Table Registers".
5443 */
5444 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5445 const char *const pszFailure = "VM-exit";
5446
5447 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5448 {
5449 /* Base. */
5450 if (X86_IS_CANONICAL(pVmcs->u64GuestGdtrBase.u))
5451 { /* likely */ }
5452 else
5453 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrBase);
5454
5455 if (X86_IS_CANONICAL(pVmcs->u64GuestIdtrBase.u))
5456 { /* likely */ }
5457 else
5458 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrBase);
5459 }
5460
5461 /* Limit. */
5462 if (!RT_HI_U16(pVmcs->u32GuestGdtrLimit))
5463 { /* likely */ }
5464 else
5465 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrLimit);
5466
5467 if (!RT_HI_U16(pVmcs->u32GuestIdtrLimit))
5468 { /* likely */ }
5469 else
5470 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrLimit);
5471
5472 NOREF(pszInstr);
5473 NOREF(pszFailure);
5474 return VINF_SUCCESS;
5475}
5476
5477
5478/**
5479 * Checks guest RIP and RFLAGS as part of VM-entry.
5480 *
5481 * @param pVCpu The cross context virtual CPU structure.
5482 * @param pszInstr The VMX instruction name (for logging purposes).
5483 */
5484IEM_STATIC int iemVmxVmentryCheckGuestRipRFlags(PVMCPU pVCpu, const char *pszInstr)
5485{
5486 /*
5487 * RIP and RFLAGS.
5488 * See Intel spec. 26.3.1.4 "Checks on Guest RIP and RFLAGS".
5489 */
5490 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5491 const char *const pszFailure = "VM-exit";
5492 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5493
5494 /* RIP. */
5495 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5496 {
5497 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
5498 if ( !fGstInLongMode
5499 || !AttrCs.n.u1Long)
5500 {
5501 if (!RT_HI_U32(pVmcs->u64GuestRip.u))
5502 { /* likely */ }
5503 else
5504 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRipRsvd);
5505 }
5506
5507 if ( fGstInLongMode
5508 && AttrCs.n.u1Long)
5509 {
5510 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth == 48); /* Canonical. */
5511 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth < 64
5512 && X86_IS_CANONICAL(pVmcs->u64GuestRip.u))
5513 { /* likely */ }
5514 else
5515 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRip);
5516 }
5517 }
5518
5519 /* RFLAGS (bits 63:22 (or 31:22), bits 15, 5, 3 are reserved, bit 1 MB1). */
5520 uint64_t const uGuestRFlags = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode ? pVmcs->u64GuestRFlags.u
5521 : pVmcs->u64GuestRFlags.s.Lo;
5522 if ( !(uGuestRFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK))
5523 && (uGuestRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK)
5524 { /* likely */ }
5525 else
5526 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsRsvd);
5527
5528 if ( fGstInLongMode
5529 || !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
5530 {
5531 if (!(uGuestRFlags & X86_EFL_VM))
5532 { /* likely */ }
5533 else
5534 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsVm);
5535 }
5536
5537 if ( VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo)
5538 && VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo) == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5539 {
5540 if (uGuestRFlags & X86_EFL_IF)
5541 { /* likely */ }
5542 else
5543 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsIf);
5544 }
5545
5546 NOREF(pszInstr);
5547 NOREF(pszFailure);
5548 return VINF_SUCCESS;
5549}
5550
5551
5552/**
5553 * Checks guest non-register state as part of VM-entry.
5554 *
5555 * @param pVCpu The cross context virtual CPU structure.
5556 * @param pszInstr The VMX instruction name (for logging purposes).
5557 */
5558IEM_STATIC int iemVmxVmentryCheckGuestNonRegState(PVMCPU pVCpu, const char *pszInstr)
5559{
5560 /*
5561 * Guest non-register state.
5562 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
5563 */
5564 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5565 const char *const pszFailure = "VM-exit";
5566
5567 /*
5568 * Activity state.
5569 */
5570 uint64_t const u64GuestVmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
5571 uint32_t const fActivityStateMask = RT_BF_GET(u64GuestVmxMiscMsr, VMX_BF_MISC_ACTIVITY_STATES);
5572 if (!(pVmcs->u32GuestActivityState & fActivityStateMask))
5573 { /* likely */ }
5574 else
5575 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateRsvd);
5576
5577 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5578 if ( !AttrSs.n.u2Dpl
5579 || pVmcs->u32GuestActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT)
5580 { /* likely */ }
5581 else
5582 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateSsDpl);
5583
5584 if ( pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI
5585 || pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5586 {
5587 if (pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE)
5588 { /* likely */ }
5589 else
5590 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateStiMovSs);
5591 }
5592
5593 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
5594 {
5595 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
5596 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(pVmcs->u32EntryIntInfo);
5597 AssertCompile(VMX_V_GUEST_ACTIVITY_STATE_MASK == (VMX_VMCS_GUEST_ACTIVITY_HLT | VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN));
5598 switch (pVmcs->u32GuestActivityState)
5599 {
5600 case VMX_VMCS_GUEST_ACTIVITY_HLT:
5601 {
5602 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT
5603 || uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
5604 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5605 && ( uVector == X86_XCPT_DB
5606 || uVector == X86_XCPT_MC))
5607 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
5608 && uVector == VMX_ENTRY_INT_INFO_VECTOR_MTF))
5609 { /* likely */ }
5610 else
5611 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateHlt);
5612 break;
5613 }
5614
5615 case VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN:
5616 {
5617 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
5618 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5619 && uVector == X86_XCPT_MC))
5620 { /* likely */ }
5621 else
5622 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateShutdown);
5623 break;
5624 }
5625
5626 case VMX_VMCS_GUEST_ACTIVITY_ACTIVE:
5627 default:
5628 break;
5629 }
5630 }
5631
5632 /*
5633 * Interruptibility state.
5634 */
5635 if (!(pVmcs->u32GuestIntrState & ~VMX_VMCS_GUEST_INT_STATE_MASK))
5636 { /* likely */ }
5637 else
5638 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRsvd);
5639
5640 if ((pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5641 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5642 { /* likely */ }
5643 else
5644 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateStiMovSs);
5645
5646 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_IF)
5647 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5648 { /* likely */ }
5649 else
5650 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRFlagsSti);
5651
5652 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
5653 {
5654 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
5655 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5656 {
5657 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
5658 { /* likely */ }
5659 else
5660 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateExtInt);
5661 }
5662 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5663 {
5664 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
5665 { /* likely */ }
5666 else
5667 {
5668 /*
5669 * We don't support injecting NMIs when blocking-by-STI would be in effect.
5670 * We update the VM-exit qualification only when blocking-by-STI is set
5671 * without blocking-by-MovSS being set. Although in practise it does not
5672 * make much difference since the order of checks are implementation defined.
5673 */
5674 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5675 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_NMI_INJECT);
5676 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateNmi);
5677 }
5678
5679 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5680 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI))
5681 { /* likely */ }
5682 else
5683 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateVirtNmi);
5684 }
5685 }
5686
5687 /* We don't support SMM yet. So blocking-by-SMIs must not be set. */
5688 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI))
5689 { /* likely */ }
5690 else
5691 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateSmi);
5692
5693 /* We don't support SGX yet. So enclave-interruption must not be set. */
5694 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_ENCLAVE))
5695 { /* likely */ }
5696 else
5697 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateEnclave);
5698
5699 /*
5700 * Pending debug exceptions.
5701 */
5702 uint64_t const uPendingDbgXcpt = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode
5703 ? pVmcs->u64GuestPendingDbgXcpt.u
5704 : pVmcs->u64GuestPendingDbgXcpt.s.Lo;
5705 if (!(uPendingDbgXcpt & ~VMX_VMCS_GUEST_PENDING_DEBUG_VALID_MASK))
5706 { /* likely */ }
5707 else
5708 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRsvd);
5709
5710 if ( (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5711 || pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5712 {
5713 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_TF)
5714 && !(pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF)
5715 && !(uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
5716 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf);
5717
5718 if ( ( !(pVmcs->u64GuestRFlags.u & X86_EFL_TF)
5719 || (pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF))
5720 && (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
5721 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf);
5722 }
5723
5724 /* We don't support RTM (Real-time Transactional Memory) yet. */
5725 if (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_RTM)
5726 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRtm);
5727
5728 /*
5729 * VMCS link pointer.
5730 */
5731 if (pVmcs->u64VmcsLinkPtr.u != UINT64_C(0xffffffffffffffff))
5732 {
5733 RTGCPHYS const GCPhysShadowVmcs = pVmcs->u64VmcsLinkPtr.u;
5734 /* We don't support SMM yet (so VMCS link pointer cannot be the current VMCS). */
5735 if (GCPhysShadowVmcs != IEM_VMX_GET_CURRENT_VMCS(pVCpu))
5736 { /* likely */ }
5737 else
5738 {
5739 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5740 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrCurVmcs);
5741 }
5742
5743 /* Validate the address. */
5744 if ( (GCPhysShadowVmcs & X86_PAGE_4K_OFFSET_MASK)
5745 || (GCPhysShadowVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5746 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysShadowVmcs))
5747 {
5748 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5749 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmcsLinkPtr);
5750 }
5751
5752 /* Read the VMCS-link pointer from guest memory. */
5753 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs));
5754 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs),
5755 GCPhysShadowVmcs, VMX_V_VMCS_SIZE);
5756 if (RT_FAILURE(rc))
5757 {
5758 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5759 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys);
5760 }
5761
5762 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
5763 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID)
5764 { /* likely */ }
5765 else
5766 {
5767 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5768 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrRevId);
5769 }
5770
5771 /* Verify the shadow bit is set if VMCS shadowing is enabled . */
5772 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
5773 || pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.fIsShadowVmcs)
5774 { /* likely */ }
5775 else
5776 {
5777 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5778 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrShadow);
5779 }
5780
5781 /* Finally update our cache of the guest physical address of the shadow VMCS. */
5782 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs = GCPhysShadowVmcs;
5783 }
5784
5785 NOREF(pszInstr);
5786 NOREF(pszFailure);
5787 return VINF_SUCCESS;
5788}
5789
5790
5791/**
5792 * Checks if the PDPTEs referenced by the nested-guest CR3 are valid as part of
5793 * VM-entry.
5794 *
5795 * @returns @c true if all PDPTEs are valid, @c false otherwise.
5796 * @param pVCpu The cross context virtual CPU structure.
5797 * @param pszInstr The VMX instruction name (for logging purposes).
5798 * @param pVmcs Pointer to the virtual VMCS.
5799 */
5800IEM_STATIC int iemVmxVmentryCheckGuestPdptesForCr3(PVMCPU pVCpu, const char *pszInstr, PVMXVVMCS pVmcs)
5801{
5802 /*
5803 * Check PDPTEs.
5804 * See Intel spec. 4.4.1 "PDPTE Registers".
5805 */
5806 uint64_t const uGuestCr3 = pVmcs->u64GuestCr3.u & X86_CR3_PAE_PAGE_MASK;
5807 const char *const pszFailure = "VM-exit";
5808
5809 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
5810 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uGuestCr3, sizeof(aPdptes));
5811 if (RT_SUCCESS(rc))
5812 {
5813 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
5814 {
5815 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
5816 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
5817 { /* likely */ }
5818 else
5819 {
5820 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
5821 VMXVDIAG const enmDiag = iemVmxGetDiagVmentryPdpteRsvd(iPdpte);
5822 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5823 }
5824 }
5825 }
5826 else
5827 {
5828 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
5829 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys);
5830 }
5831
5832 NOREF(pszFailure);
5833 return rc;
5834}
5835
5836
5837/**
5838 * Checks guest PDPTEs as part of VM-entry.
5839 *
5840 * @param pVCpu The cross context virtual CPU structure.
5841 * @param pszInstr The VMX instruction name (for logging purposes).
5842 */
5843IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPU pVCpu, const char *pszInstr)
5844{
5845 /*
5846 * Guest PDPTEs.
5847 * See Intel spec. 26.3.1.5 "Checks on Guest Page-Directory-Pointer-Table Entries".
5848 */
5849 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5850 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5851
5852 /* Check PDPTes if the VM-entry is to a guest using PAE paging. */
5853 int rc;
5854 if ( !fGstInLongMode
5855 && (pVmcs->u64GuestCr4.u & X86_CR4_PAE)
5856 && (pVmcs->u64GuestCr0.u & X86_CR0_PG))
5857 {
5858 /*
5859 * We don't support nested-paging for nested-guests yet.
5860 *
5861 * Without nested-paging for nested-guests, PDPTEs in the VMCS are not used,
5862 * rather we need to check the PDPTEs referenced by the guest CR3.
5863 */
5864 rc = iemVmxVmentryCheckGuestPdptesForCr3(pVCpu, pszInstr, pVmcs);
5865 }
5866 else
5867 rc = VINF_SUCCESS;
5868 return rc;
5869}
5870
5871
5872/**
5873 * Checks guest-state as part of VM-entry.
5874 *
5875 * @returns VBox status code.
5876 * @param pVCpu The cross context virtual CPU structure.
5877 * @param pszInstr The VMX instruction name (for logging purposes).
5878 */
5879IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr)
5880{
5881 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
5882 if (RT_SUCCESS(rc))
5883 {
5884 rc = iemVmxVmentryCheckGuestSegRegs(pVCpu, pszInstr);
5885 if (RT_SUCCESS(rc))
5886 {
5887 rc = iemVmxVmentryCheckGuestGdtrIdtr(pVCpu, pszInstr);
5888 if (RT_SUCCESS(rc))
5889 {
5890 rc = iemVmxVmentryCheckGuestRipRFlags(pVCpu, pszInstr);
5891 if (RT_SUCCESS(rc))
5892 {
5893 rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr);
5894 if (RT_SUCCESS(rc))
5895 return iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr);
5896 }
5897 }
5898 }
5899 }
5900 return rc;
5901}
5902
5903
5904/**
5905 * Checks host-state as part of VM-entry.
5906 *
5907 * @returns VBox status code.
5908 * @param pVCpu The cross context virtual CPU structure.
5909 * @param pszInstr The VMX instruction name (for logging purposes).
5910 */
5911IEM_STATIC int iemVmxVmentryCheckHostState(PVMCPU pVCpu, const char *pszInstr)
5912{
5913 /*
5914 * Host Control Registers and MSRs.
5915 * See Intel spec. 26.2.2 "Checks on Host Control Registers and MSRs".
5916 */
5917 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5918 const char * const pszFailure = "VMFail";
5919
5920 /* CR0 reserved bits. */
5921 {
5922 /* CR0 MB1 bits. */
5923 uint64_t const u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
5924 if ((pVmcs->u64HostCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0)
5925 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0);
5926
5927 /* CR0 MBZ bits. */
5928 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
5929 if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1)
5930 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1);
5931 }
5932
5933 /* CR4 reserved bits. */
5934 {
5935 /* CR4 MB1 bits. */
5936 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
5937 if ((pVmcs->u64HostCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
5938 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0);
5939
5940 /* CR4 MBZ bits. */
5941 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
5942 if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1)
5943 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1);
5944 }
5945
5946 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5947 {
5948 /* CR3 reserved bits. */
5949 if (!(pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
5950 { /* likely */ }
5951 else
5952 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr3);
5953
5954 /* SYSENTER ESP and SYSENTER EIP. */
5955 if ( X86_IS_CANONICAL(pVmcs->u64HostSysenterEsp.u)
5956 && X86_IS_CANONICAL(pVmcs->u64HostSysenterEip.u))
5957 { /* likely */ }
5958 else
5959 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSysenterEspEip);
5960 }
5961
5962 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
5963 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PERF_MSR));
5964
5965 /* PAT MSR. */
5966 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
5967 || CPUMIsPatMsrValid(pVmcs->u64HostPatMsr.u))
5968 { /* likely */ }
5969 else
5970 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostPatMsr);
5971
5972 /* EFER MSR. */
5973 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
5974 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
5975 || !(pVmcs->u64HostEferMsr.u & ~uValidEferMask))
5976 { /* likely */ }
5977 else
5978 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsrRsvd);
5979
5980 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
5981 bool const fHostLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
5982 bool const fHostLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
5983 if ( fHostInLongMode == fHostLma
5984 && fHostInLongMode == fHostLme)
5985 { /* likely */ }
5986 else
5987 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsr);
5988
5989 /*
5990 * Host Segment and Descriptor-Table Registers.
5991 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
5992 */
5993 /* Selector RPL and TI. */
5994 if ( !(pVmcs->HostCs & (X86_SEL_RPL | X86_SEL_LDT))
5995 && !(pVmcs->HostSs & (X86_SEL_RPL | X86_SEL_LDT))
5996 && !(pVmcs->HostDs & (X86_SEL_RPL | X86_SEL_LDT))
5997 && !(pVmcs->HostEs & (X86_SEL_RPL | X86_SEL_LDT))
5998 && !(pVmcs->HostFs & (X86_SEL_RPL | X86_SEL_LDT))
5999 && !(pVmcs->HostGs & (X86_SEL_RPL | X86_SEL_LDT))
6000 && !(pVmcs->HostTr & (X86_SEL_RPL | X86_SEL_LDT)))
6001 { /* likely */ }
6002 else
6003 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSel);
6004
6005 /* CS and TR selectors cannot be 0. */
6006 if ( pVmcs->HostCs
6007 && pVmcs->HostTr)
6008 { /* likely */ }
6009 else
6010 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCsTr);
6011
6012 /* SS cannot be 0 if 32-bit host. */
6013 if ( fHostInLongMode
6014 || pVmcs->HostSs)
6015 { /* likely */ }
6016 else
6017 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSs);
6018
6019 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6020 {
6021 /* FS, GS, GDTR, IDTR, TR base address. */
6022 if ( X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
6023 && X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
6024 && X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u)
6025 && X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u)
6026 && X86_IS_CANONICAL(pVmcs->u64HostTrBase.u))
6027 { /* likely */ }
6028 else
6029 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSegBase);
6030 }
6031
6032 /*
6033 * Host address-space size for 64-bit CPUs.
6034 * See Intel spec. 26.2.4 "Checks Related to Address-Space Size".
6035 */
6036 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
6037 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6038 {
6039 bool const fCpuInLongMode = CPUMIsGuestInLongMode(pVCpu);
6040
6041 /* Logical processor in IA-32e mode. */
6042 if (fCpuInLongMode)
6043 {
6044 if (fHostInLongMode)
6045 {
6046 /* PAE must be set. */
6047 if (pVmcs->u64HostCr4.u & X86_CR4_PAE)
6048 { /* likely */ }
6049 else
6050 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pae);
6051
6052 /* RIP must be canonical. */
6053 if (X86_IS_CANONICAL(pVmcs->u64HostRip.u))
6054 { /* likely */ }
6055 else
6056 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRip);
6057 }
6058 else
6059 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostLongMode);
6060 }
6061 else
6062 {
6063 /* Logical processor is outside IA-32e mode. */
6064 if ( !fGstInLongMode
6065 && !fHostInLongMode)
6066 {
6067 /* PCIDE should not be set. */
6068 if (!(pVmcs->u64HostCr4.u & X86_CR4_PCIDE))
6069 { /* likely */ }
6070 else
6071 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pcide);
6072
6073 /* The high 32-bits of RIP MBZ. */
6074 if (!pVmcs->u64HostRip.s.Hi)
6075 { /* likely */ }
6076 else
6077 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRipRsvd);
6078 }
6079 else
6080 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongMode);
6081 }
6082 }
6083 else
6084 {
6085 /* Host address-space size for 32-bit CPUs. */
6086 if ( !fGstInLongMode
6087 && !fHostInLongMode)
6088 { /* likely */ }
6089 else
6090 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongModeNoCpu);
6091 }
6092
6093 NOREF(pszInstr);
6094 NOREF(pszFailure);
6095 return VINF_SUCCESS;
6096}
6097
6098
6099/**
6100 * Checks VM-entry controls fields as part of VM-entry.
6101 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
6102 *
6103 * @returns VBox status code.
6104 * @param pVCpu The cross context virtual CPU structure.
6105 * @param pszInstr The VMX instruction name (for logging purposes).
6106 */
6107IEM_STATIC int iemVmxVmentryCheckEntryCtls(PVMCPU pVCpu, const char *pszInstr)
6108{
6109 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6110 const char * const pszFailure = "VMFail";
6111
6112 /* VM-entry controls. */
6113 VMXCTLSMSR EntryCtls;
6114 EntryCtls.u = CPUMGetGuestIa32VmxEntryCtls(pVCpu);
6115 if (~pVmcs->u32EntryCtls & EntryCtls.n.disallowed0)
6116 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
6117
6118 if (pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1)
6119 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1);
6120
6121 /* Event injection. */
6122 uint32_t const uIntInfo = pVmcs->u32EntryIntInfo;
6123 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VALID))
6124 {
6125 /* Type and vector. */
6126 uint8_t const uType = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_TYPE);
6127 uint8_t const uVector = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VECTOR);
6128 uint8_t const uRsvd = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_RSVD_12_30);
6129 if ( !uRsvd
6130 && HMVmxIsEntryIntInfoTypeValid(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxMonitorTrapFlag, uType)
6131 && HMVmxIsEntryIntInfoVectorValid(uVector, uType))
6132 { /* likely */ }
6133 else
6134 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd);
6135
6136 /* Exception error code. */
6137 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID))
6138 {
6139 /* Delivery possible only in Unrestricted-guest mode when CR0.PE is set. */
6140 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
6141 || (pVmcs->u64GuestCr0.s.Lo & X86_CR0_PE))
6142 { /* likely */ }
6143 else
6144 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodePe);
6145
6146 /* Exceptions that provide an error code. */
6147 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
6148 && ( uVector == X86_XCPT_DF
6149 || uVector == X86_XCPT_TS
6150 || uVector == X86_XCPT_NP
6151 || uVector == X86_XCPT_SS
6152 || uVector == X86_XCPT_GP
6153 || uVector == X86_XCPT_PF
6154 || uVector == X86_XCPT_AC))
6155 { /* likely */ }
6156 else
6157 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec);
6158
6159 /* Exception error-code reserved bits. */
6160 if (!(pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK))
6161 { /* likely */ }
6162 else
6163 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd);
6164
6165 /* Injecting a software interrupt, software exception or privileged software exception. */
6166 if ( uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
6167 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
6168 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
6169 {
6170 /* Instruction length must be in the range 0-15. */
6171 if (pVmcs->u32EntryInstrLen <= VMX_ENTRY_INSTR_LEN_MAX)
6172 { /* likely */ }
6173 else
6174 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLen);
6175
6176 /* Instruction length of 0 is allowed only when its CPU feature is present. */
6177 if ( pVmcs->u32EntryInstrLen == 0
6178 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEntryInjectSoftInt)
6179 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLenZero);
6180 }
6181 }
6182 }
6183
6184 /* VM-entry MSR-load count and VM-entry MSR-load area address. */
6185 if (pVmcs->u32EntryMsrLoadCount)
6186 {
6187 if ( (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
6188 || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6189 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
6190 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad);
6191 }
6192
6193 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)); /* We don't support SMM yet. */
6194 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON)); /* We don't support dual-monitor treatment yet. */
6195
6196 NOREF(pszInstr);
6197 NOREF(pszFailure);
6198 return VINF_SUCCESS;
6199}
6200
6201
6202/**
6203 * Checks VM-exit controls fields as part of VM-entry.
6204 * See Intel spec. 26.2.1.2 "VM-Exit Control Fields".
6205 *
6206 * @returns VBox status code.
6207 * @param pVCpu The cross context virtual CPU structure.
6208 * @param pszInstr The VMX instruction name (for logging purposes).
6209 */
6210IEM_STATIC int iemVmxVmentryCheckExitCtls(PVMCPU pVCpu, const char *pszInstr)
6211{
6212 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6213 const char * const pszFailure = "VMFail";
6214
6215 /* VM-exit controls. */
6216 VMXCTLSMSR ExitCtls;
6217 ExitCtls.u = CPUMGetGuestIa32VmxExitCtls(pVCpu);
6218 if (~pVmcs->u32ExitCtls & ExitCtls.n.disallowed0)
6219 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
6220
6221 if (pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1)
6222 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1);
6223
6224 /* Save preemption timer without activating it. */
6225 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
6226 && (pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
6227 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer);
6228
6229 /* VM-exit MSR-store count and VM-exit MSR-store area address. */
6230 if (pVmcs->u32ExitMsrStoreCount)
6231 {
6232 if ( (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
6233 || (pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6234 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
6235 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore);
6236 }
6237
6238 /* VM-exit MSR-load count and VM-exit MSR-load area address. */
6239 if (pVmcs->u32ExitMsrLoadCount)
6240 {
6241 if ( (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
6242 || (pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6243 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
6244 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad);
6245 }
6246
6247 NOREF(pszInstr);
6248 NOREF(pszFailure);
6249 return VINF_SUCCESS;
6250}
6251
6252
6253/**
6254 * Checks VM-execution controls fields as part of VM-entry.
6255 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
6256 *
6257 * @returns VBox status code.
6258 * @param pVCpu The cross context virtual CPU structure.
6259 * @param pszInstr The VMX instruction name (for logging purposes).
6260 *
6261 * @remarks This may update secondary-processor based VM-execution control fields
6262 * in the current VMCS if necessary.
6263 */
6264IEM_STATIC int iemVmxVmentryCheckExecCtls(PVMCPU pVCpu, const char *pszInstr)
6265{
6266 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6267 const char * const pszFailure = "VMFail";
6268
6269 /* Pin-based VM-execution controls. */
6270 {
6271 VMXCTLSMSR PinCtls;
6272 PinCtls.u = CPUMGetGuestIa32VmxPinbasedCtls(pVCpu);
6273 if (~pVmcs->u32PinCtls & PinCtls.n.disallowed0)
6274 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
6275
6276 if (pVmcs->u32PinCtls & ~PinCtls.n.allowed1)
6277 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1);
6278 }
6279
6280 /* Processor-based VM-execution controls. */
6281 {
6282 VMXCTLSMSR ProcCtls;
6283 ProcCtls.u = CPUMGetGuestIa32VmxProcbasedCtls(pVCpu);
6284 if (~pVmcs->u32ProcCtls & ProcCtls.n.disallowed0)
6285 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
6286
6287 if (pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1)
6288 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1);
6289 }
6290
6291 /* Secondary processor-based VM-execution controls. */
6292 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
6293 {
6294 VMXCTLSMSR ProcCtls2;
6295 ProcCtls2.u = CPUMGetGuestIa32VmxProcbasedCtls2(pVCpu);
6296 if (~pVmcs->u32ProcCtls2 & ProcCtls2.n.disallowed0)
6297 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
6298
6299 if (pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1)
6300 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1);
6301 }
6302 else
6303 Assert(!pVmcs->u32ProcCtls2);
6304
6305 /* CR3-target count. */
6306 if (pVmcs->u32Cr3TargetCount <= VMX_V_CR3_TARGET_COUNT)
6307 { /* likely */ }
6308 else
6309 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Cr3TargetCount);
6310
6311 /* I/O bitmaps physical addresses. */
6312 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
6313 {
6314 if ( (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK)
6315 || (pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6316 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u))
6317 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA);
6318
6319 if ( (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK)
6320 || (pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6321 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u))
6322 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
6323 }
6324
6325 /* MSR bitmap physical address. */
6326 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
6327 {
6328 RTGCPHYS const GCPhysMsrBitmap = pVmcs->u64AddrMsrBitmap.u;
6329 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
6330 || (GCPhysMsrBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6331 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysMsrBitmap))
6332 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
6333
6334 /* Read the MSR bitmap. */
6335 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
6336 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap),
6337 GCPhysMsrBitmap, VMX_V_MSR_BITMAP_SIZE);
6338 if (RT_FAILURE(rc))
6339 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrBitmapPtrReadPhys);
6340 }
6341
6342 /* TPR shadow related controls. */
6343 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
6344 {
6345 /* Virtual-APIC page physical address. */
6346 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
6347 if ( (GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
6348 || (GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6349 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
6350 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
6351
6352 /* Read the Virtual-APIC page. */
6353 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
6354 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage),
6355 GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES);
6356 if (RT_FAILURE(rc))
6357 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
6358
6359 /* TPR threshold without virtual-interrupt delivery. */
6360 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
6361 && (pVmcs->u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK))
6362 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdRsvd);
6363
6364 /* TPR threshold and VTPR. */
6365 uint8_t const *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
6366 uint8_t const u8VTpr = *(pbVirtApic + XAPIC_OFF_TPR);
6367 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
6368 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
6369 && RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) > ((u8VTpr >> 4) & UINT32_C(0xf)) /* Bits 4:7 */)
6370 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
6371 }
6372 else
6373 {
6374 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6375 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
6376 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
6377 { /* likely */ }
6378 else
6379 {
6380 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6381 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicTprShadow);
6382 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
6383 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ApicRegVirt);
6384 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
6385 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtIntDelivery);
6386 }
6387 }
6388
6389 /* NMI exiting and virtual-NMIs. */
6390 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
6391 && (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
6392 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi);
6393
6394 /* Virtual-NMIs and NMI-window exiting. */
6395 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6396 && (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
6397 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit);
6398
6399 /* Virtualize APIC accesses. */
6400 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
6401 {
6402 /* APIC-access physical address. */
6403 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
6404 if ( (GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
6405 || (GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6406 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
6407 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
6408
6409 /*
6410 * Disallow APIC-access page and virtual-APIC page from being the same address.
6411 * Note! This is not an Intel requirement, but one imposed by our implementation.
6412 */
6413 /** @todo r=ramshankar: This is done primarily to simplify recursion scenarios while
6414 * redirecting accesses between the APIC-access page and the virtual-APIC
6415 * page. If any nested hypervisor requires this, we can implement it later. */
6416 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
6417 {
6418 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
6419 if (GCPhysVirtApic == GCPhysApicAccess)
6420 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessEqVirtApic);
6421 }
6422
6423 /*
6424 * Register the handler for the APIC-access page.
6425 *
6426 * We don't deregister the APIC-access page handler during the VM-exit as a different
6427 * nested-VCPU might be using the same guest-physical address for its APIC-access page.
6428 *
6429 * We leave the page registered until the first access that happens outside VMX non-root
6430 * mode. Guest software is allowed to access structures such as the APIC-access page
6431 * only when no logical processor with a current VMCS references it in VMX non-root mode,
6432 * otherwise it can lead to unpredictable behavior including guest triple-faults.
6433 *
6434 * See Intel spec. 24.11.4 "Software Access to Related Structures".
6435 */
6436 int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess,
6437 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
6438 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
6439 if (RT_FAILURE(rc))
6440 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessHandlerReg);
6441 }
6442
6443 /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */
6444 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6445 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
6446 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
6447
6448 /* Virtual-interrupt delivery requires external interrupt exiting. */
6449 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
6450 && !(pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
6451 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
6452
6453 /* VPID. */
6454 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID)
6455 || pVmcs->u16Vpid != 0)
6456 { /* likely */ }
6457 else
6458 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Vpid);
6459
6460 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_POSTED_INT)); /* We don't support posted interrupts yet. */
6461 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)); /* We don't support EPT yet. */
6462 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PML)); /* We don't support PML yet. */
6463 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)); /* We don't support Unrestricted-guests yet. */
6464 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMFUNC)); /* We don't support VM functions yet. */
6465 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT_VE)); /* We don't support EPT-violation #VE yet. */
6466 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)); /* We don't support Pause-loop exiting yet. */
6467
6468 /* VMCS shadowing. */
6469 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
6470 {
6471 /* VMREAD-bitmap physical address. */
6472 RTGCPHYS const GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
6473 if ( ( GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
6474 || ( GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6475 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
6476 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap);
6477
6478 /* VMWRITE-bitmap physical address. */
6479 RTGCPHYS const GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
6480 if ( ( GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
6481 || ( GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6482 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
6483 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
6484
6485 /* Read the VMREAD-bitmap. */
6486 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
6487 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap),
6488 GCPhysVmreadBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
6489 if (RT_FAILURE(rc))
6490 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys);
6491
6492 /* Read the VMWRITE-bitmap. */
6493 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap));
6494 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap),
6495 GCPhysVmwriteBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
6496 if (RT_FAILURE(rc))
6497 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys);
6498 }
6499
6500 NOREF(pszInstr);
6501 NOREF(pszFailure);
6502 return VINF_SUCCESS;
6503}
6504
6505
6506/**
6507 * Loads the guest control registers, debug register and some MSRs as part of
6508 * VM-entry.
6509 *
6510 * @param pVCpu The cross context virtual CPU structure.
6511 */
6512IEM_STATIC void iemVmxVmentryLoadGuestControlRegsMsrs(PVMCPU pVCpu)
6513{
6514 /*
6515 * Load guest control registers, debug registers and MSRs.
6516 * See Intel spec. 26.3.2.1 "Loading Guest Control Registers, Debug Registers and MSRs".
6517 */
6518 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6519 uint64_t const uGstCr0 = (pVmcs->u64GuestCr0.u & ~VMX_ENTRY_CR0_IGNORE_MASK)
6520 | (pVCpu->cpum.GstCtx.cr0 & VMX_ENTRY_CR0_IGNORE_MASK);
6521 CPUMSetGuestCR0(pVCpu, uGstCr0);
6522 CPUMSetGuestCR4(pVCpu, pVmcs->u64GuestCr4.u);
6523 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64GuestCr3.u;
6524
6525 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
6526 pVCpu->cpum.GstCtx.dr[7] = (pVmcs->u64GuestDr7.u & ~VMX_ENTRY_DR7_MBZ_MASK) | VMX_ENTRY_DR7_MB1_MASK;
6527
6528 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64GuestSysenterEip.s.Lo;
6529 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64GuestSysenterEsp.s.Lo;
6530 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32GuestSysenterCS;
6531
6532 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6533 {
6534 /* FS base and GS base are loaded while loading the rest of the guest segment registers. */
6535
6536 /* EFER MSR. */
6537 if (!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR))
6538 {
6539 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
6540 bool const fGstPaging = RT_BOOL(uGstCr0 & X86_CR0_PG);
6541 uint64_t const uHostEfer = pVCpu->cpum.GstCtx.msrEFER;
6542 if (fGstInLongMode)
6543 {
6544 /* If the nested-guest is in long mode, LMA and LME are both set. */
6545 Assert(fGstPaging);
6546 pVCpu->cpum.GstCtx.msrEFER = uHostEfer | (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
6547 }
6548 else
6549 {
6550 /*
6551 * If the nested-guest is outside long mode:
6552 * - With paging: LMA is cleared, LME is cleared.
6553 * - Without paging: LMA is cleared, LME is left unmodified.
6554 */
6555 uint64_t const fLmaLmeMask = MSR_K6_EFER_LMA | (fGstPaging ? MSR_K6_EFER_LME : 0);
6556 pVCpu->cpum.GstCtx.msrEFER = uHostEfer & ~fLmaLmeMask;
6557 }
6558 }
6559 /* else: see below. */
6560 }
6561
6562 /* PAT MSR. */
6563 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
6564 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64GuestPatMsr.u;
6565
6566 /* EFER MSR. */
6567 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
6568 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64GuestEferMsr.u;
6569
6570 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
6571 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
6572
6573 /* We don't support IA32_BNDCFGS MSR yet. */
6574 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
6575
6576 /* Nothing to do for SMBASE register - We don't support SMM yet. */
6577}
6578
6579
6580/**
6581 * Loads the guest segment registers, GDTR, IDTR, LDTR and TR as part of VM-entry.
6582 *
6583 * @param pVCpu The cross context virtual CPU structure.
6584 */
6585IEM_STATIC void iemVmxVmentryLoadGuestSegRegs(PVMCPU pVCpu)
6586{
6587 /*
6588 * Load guest segment registers, GDTR, IDTR, LDTR and TR.
6589 * See Intel spec. 26.3.2.2 "Loading Guest Segment Registers and Descriptor-Table Registers".
6590 */
6591 /* CS, SS, ES, DS, FS, GS. */
6592 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6593 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
6594 {
6595 PCPUMSELREG pGstSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6596 CPUMSELREG VmcsSelReg;
6597 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &VmcsSelReg);
6598 AssertRC(rc); NOREF(rc);
6599 if (!(VmcsSelReg.Attr.u & X86DESCATTR_UNUSABLE))
6600 {
6601 pGstSelReg->Sel = VmcsSelReg.Sel;
6602 pGstSelReg->ValidSel = VmcsSelReg.Sel;
6603 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6604 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6605 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
6606 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
6607 }
6608 else
6609 {
6610 pGstSelReg->Sel = VmcsSelReg.Sel;
6611 pGstSelReg->ValidSel = VmcsSelReg.Sel;
6612 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6613 switch (iSegReg)
6614 {
6615 case X86_SREG_CS:
6616 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6617 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
6618 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
6619 break;
6620
6621 case X86_SREG_SS:
6622 pGstSelReg->u64Base = VmcsSelReg.u64Base & UINT32_C(0xfffffff0);
6623 pGstSelReg->u32Limit = 0;
6624 pGstSelReg->Attr.u = (VmcsSelReg.Attr.u & X86DESCATTR_DPL) | X86DESCATTR_D | X86DESCATTR_UNUSABLE;
6625 break;
6626
6627 case X86_SREG_ES:
6628 case X86_SREG_DS:
6629 pGstSelReg->u64Base = 0;
6630 pGstSelReg->u32Limit = 0;
6631 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
6632 break;
6633
6634 case X86_SREG_FS:
6635 case X86_SREG_GS:
6636 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6637 pGstSelReg->u32Limit = 0;
6638 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
6639 break;
6640 }
6641 Assert(pGstSelReg->Attr.n.u1Unusable);
6642 }
6643 }
6644
6645 /* LDTR. */
6646 pVCpu->cpum.GstCtx.ldtr.Sel = pVmcs->GuestLdtr;
6647 pVCpu->cpum.GstCtx.ldtr.ValidSel = pVmcs->GuestLdtr;
6648 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
6649 if (!(pVmcs->u32GuestLdtrAttr & X86DESCATTR_UNUSABLE))
6650 {
6651 pVCpu->cpum.GstCtx.ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
6652 pVCpu->cpum.GstCtx.ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
6653 pVCpu->cpum.GstCtx.ldtr.Attr.u = pVmcs->u32GuestLdtrAttr;
6654 }
6655 else
6656 {
6657 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
6658 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
6659 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
6660 }
6661
6662 /* TR. */
6663 Assert(!(pVmcs->u32GuestTrAttr & X86DESCATTR_UNUSABLE));
6664 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->GuestTr;
6665 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->GuestTr;
6666 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
6667 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64GuestTrBase.u;
6668 pVCpu->cpum.GstCtx.tr.u32Limit = pVmcs->u32GuestTrLimit;
6669 pVCpu->cpum.GstCtx.tr.Attr.u = pVmcs->u32GuestTrAttr;
6670
6671 /* GDTR. */
6672 pVCpu->cpum.GstCtx.gdtr.cbGdt = pVmcs->u32GuestGdtrLimit;
6673 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64GuestGdtrBase.u;
6674
6675 /* IDTR. */
6676 pVCpu->cpum.GstCtx.idtr.cbIdt = pVmcs->u32GuestIdtrLimit;
6677 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64GuestIdtrBase.u;
6678}
6679
6680
6681/**
6682 * Loads the guest MSRs from the VM-entry auto-load MSRs as part of VM-entry.
6683 *
6684 * @returns VBox status code.
6685 * @param pVCpu The cross context virtual CPU structure.
6686 * @param pszInstr The VMX instruction name (for logging purposes).
6687 */
6688IEM_STATIC int iemVmxVmentryLoadGuestAutoMsrs(PVMCPU pVCpu, const char *pszInstr)
6689{
6690 /*
6691 * Load guest MSRs.
6692 * See Intel spec. 26.4 "Loading MSRs".
6693 */
6694 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6695 const char *const pszFailure = "VM-exit";
6696
6697 /*
6698 * The VM-entry MSR-load area address need not be a valid guest-physical address if the
6699 * VM-entry MSR load count is 0. If this is the case, bail early without reading it.
6700 * See Intel spec. 24.8.2 "VM-Entry Controls for MSRs".
6701 */
6702 uint32_t const cMsrs = pVmcs->u32EntryMsrLoadCount;
6703 if (!cMsrs)
6704 return VINF_SUCCESS;
6705
6706 /*
6707 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count is
6708 * exceeded including possibly raising #MC exceptions during VMX transition. Our
6709 * implementation shall fail VM-entry with an VMX_EXIT_ERR_MSR_LOAD VM-exit.
6710 */
6711 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
6712 if (fIsMsrCountValid)
6713 { /* likely */ }
6714 else
6715 {
6716 iemVmxVmcsSetExitQual(pVCpu, VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
6717 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadCount);
6718 }
6719
6720 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrEntryMsrLoad.u;
6721 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
6722 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
6723 if (RT_SUCCESS(rc))
6724 {
6725 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
6726 Assert(pMsr);
6727 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
6728 {
6729 if ( !pMsr->u32Reserved
6730 && pMsr->u32Msr != MSR_K8_FS_BASE
6731 && pMsr->u32Msr != MSR_K8_GS_BASE
6732 && pMsr->u32Msr != MSR_K6_EFER
6733 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
6734 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
6735 {
6736 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
6737 if (rcStrict == VINF_SUCCESS)
6738 continue;
6739
6740 /*
6741 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-entry.
6742 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VM-entry failure
6743 * recording the MSR index in the VM-exit qualification (as per the Intel spec.) and indicated
6744 * further by our own, specific diagnostic code. Later, we can try implement handling of the
6745 * MSR in ring-0 if possible, or come up with a better, generic solution.
6746 */
6747 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
6748 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
6749 ? kVmxVDiag_Vmentry_MsrLoadRing3
6750 : kVmxVDiag_Vmentry_MsrLoad;
6751 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
6752 }
6753 else
6754 {
6755 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
6756 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadRsvd);
6757 }
6758 }
6759 }
6760 else
6761 {
6762 AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysAutoMsrArea, rc));
6763 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadPtrReadPhys);
6764 }
6765
6766 NOREF(pszInstr);
6767 NOREF(pszFailure);
6768 return VINF_SUCCESS;
6769}
6770
6771
6772/**
6773 * Loads the guest-state non-register state as part of VM-entry.
6774 *
6775 * @returns VBox status code.
6776 * @param pVCpu The cross context virtual CPU structure.
6777 *
6778 * @remarks This must be called only after loading the nested-guest register state
6779 * (especially nested-guest RIP).
6780 */
6781IEM_STATIC void iemVmxVmentryLoadGuestNonRegState(PVMCPU pVCpu)
6782{
6783 /*
6784 * Load guest non-register state.
6785 * See Intel spec. 26.6 "Special Features of VM Entry"
6786 */
6787 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6788 uint32_t const uEntryIntInfo = pVmcs->u32EntryIntInfo;
6789 if (VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
6790 {
6791 /** @todo NSTVMX: Pending debug exceptions. */
6792 Assert(!(pVmcs->u64GuestPendingDbgXcpt.u));
6793
6794 if (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
6795 {
6796 /** @todo NSTVMX: Virtual-NMIs doesn't affect NMI blocking in the normal sense.
6797 * We probably need a different force flag for virtual-NMI
6798 * pending/blocking. */
6799 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI));
6800 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6801 }
6802 else
6803 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS));
6804
6805 if (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
6806 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
6807 else
6808 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
6809
6810 /* SMI blocking is irrelevant. We don't support SMIs yet. */
6811 }
6812
6813 /* Loading PDPTEs will be taken care when we switch modes. We don't support EPT yet. */
6814 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
6815
6816 /* VPID is irrelevant. We don't support VPID yet. */
6817
6818 /* Clear address-range monitoring. */
6819 EMMonitorWaitClear(pVCpu);
6820}
6821
6822
6823/**
6824 * Loads the guest-state as part of VM-entry.
6825 *
6826 * @returns VBox status code.
6827 * @param pVCpu The cross context virtual CPU structure.
6828 * @param pszInstr The VMX instruction name (for logging purposes).
6829 *
6830 * @remarks This must be done after all the necessary steps prior to loading of
6831 * guest-state (e.g. checking various VMCS state).
6832 */
6833IEM_STATIC int iemVmxVmentryLoadGuestState(PVMCPU pVCpu, const char *pszInstr)
6834{
6835 iemVmxVmentryLoadGuestControlRegsMsrs(pVCpu);
6836 iemVmxVmentryLoadGuestSegRegs(pVCpu);
6837
6838 /*
6839 * Load guest RIP, RSP and RFLAGS.
6840 * See Intel spec. 26.3.2.3 "Loading Guest RIP, RSP and RFLAGS".
6841 */
6842 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6843 pVCpu->cpum.GstCtx.rsp = pVmcs->u64GuestRsp.u;
6844 pVCpu->cpum.GstCtx.rip = pVmcs->u64GuestRip.u;
6845 pVCpu->cpum.GstCtx.rflags.u = pVmcs->u64GuestRFlags.u;
6846
6847 /* Initialize the PAUSE-loop controls as part of VM-entry. */
6848 pVCpu->cpum.GstCtx.hwvirt.vmx.uFirstPauseLoopTick = 0;
6849 pVCpu->cpum.GstCtx.hwvirt.vmx.uPrevPauseTick = 0;
6850
6851 iemVmxVmentryLoadGuestNonRegState(pVCpu);
6852
6853 NOREF(pszInstr);
6854 return VINF_SUCCESS;
6855}
6856
6857
6858/**
6859 * Set up the VMX-preemption timer.
6860 *
6861 * @param pVCpu The cross context virtual CPU structure.
6862 * @param pszInstr The VMX instruction name (for logging purposes).
6863 */
6864IEM_STATIC void iemVmxVmentrySetupPreemptTimer(PVMCPU pVCpu, const char *pszInstr)
6865{
6866 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6867 Assert(pVmcs);
6868 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
6869 {
6870 uint64_t const uVmentryTick = TMCpuTickGetNoCheck(pVCpu);
6871 pVCpu->cpum.GstCtx.hwvirt.vmx.uVmentryTick = uVmentryTick;
6872 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER);
6873
6874 Log(("%s: VM-entry set up VMX-preemption timer at %#RX64\n", pszInstr, uVmentryTick));
6875 }
6876 else
6877 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
6878
6879 NOREF(pszInstr);
6880}
6881
6882
6883/**
6884 * Performs event injection (if any) as part of VM-entry.
6885 *
6886 * @param pVCpu The cross context virtual CPU structure.
6887 * @param pszInstr The VMX instruction name (for logging purposes).
6888 */
6889IEM_STATIC int iemVmxVmentryInjectEvent(PVMCPU pVCpu, const char *pszInstr)
6890{
6891 /*
6892 * Inject events.
6893 * See Intel spec. 26.5 "Event Injection".
6894 */
6895 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6896 uint32_t const uEntryIntInfo = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u32EntryIntInfo;
6897 if (VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
6898 {
6899 /*
6900 * The event that is going to be made pending for injection is not subject to VMX intercepts,
6901 * thus we flag ignoring of intercepts. However, recursive exceptions if any during delivery
6902 * of the current event -are- subject to intercepts, hence this flag will be flipped during
6903 * the actually delivery of this event.
6904 */
6905 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = false;
6906
6907 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
6908 if (uType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT)
6909 {
6910 Assert(VMX_ENTRY_INT_INFO_VECTOR(uEntryIntInfo) == VMX_ENTRY_INT_INFO_VECTOR_MTF);
6911 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_MTF);
6912 return VINF_SUCCESS;
6913 }
6914
6915 int rc = HMVmxEntryIntInfoInjectTrpmEvent(pVCpu, uEntryIntInfo, pVmcs->u32EntryXcptErrCode, pVmcs->u32EntryInstrLen,
6916 pVCpu->cpum.GstCtx.cr2);
6917 AssertRCReturn(rc, rc);
6918 }
6919
6920 NOREF(pszInstr);
6921 return VINF_SUCCESS;
6922}
6923
6924
6925/**
6926 * VMLAUNCH/VMRESUME instruction execution worker.
6927 *
6928 * @returns Strict VBox status code.
6929 * @param pVCpu The cross context virtual CPU structure.
6930 * @param cbInstr The instruction length in bytes.
6931 * @param uInstrId The instruction identity (VMXINSTRID_VMLAUNCH or
6932 * VMXINSTRID_VMRESUME).
6933 * @param pExitInfo Pointer to the VM-exit instruction information struct.
6934 * Optional, can be NULL.
6935 *
6936 * @remarks Common VMX instruction checks are already expected to by the caller,
6937 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
6938 */
6939IEM_STATIC VBOXSTRICTRC iemVmxVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId, PCVMXVEXITINFO pExitInfo)
6940{
6941 Assert( uInstrId == VMXINSTRID_VMLAUNCH
6942 || uInstrId == VMXINSTRID_VMRESUME);
6943 const char *pszInstr = uInstrId == VMXINSTRID_VMRESUME ? "vmresume" : "vmlaunch";
6944
6945 /* Nested-guest intercept. */
6946 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6947 {
6948 if (pExitInfo)
6949 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
6950 uint32_t const uExitReason = uInstrId == VMXINSTRID_VMRESUME ? VMX_EXIT_VMRESUME : VMX_EXIT_VMLAUNCH;
6951 return iemVmxVmexitInstrNeedsInfo(pVCpu, uExitReason, uInstrId, cbInstr);
6952 }
6953
6954 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
6955
6956 /* CPL. */
6957 if (pVCpu->iem.s.uCpl > 0)
6958 {
6959 Log(("%s: CPL %u -> #GP(0)\n", pszInstr, pVCpu->iem.s.uCpl));
6960 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_Cpl;
6961 return iemRaiseGeneralProtectionFault0(pVCpu);
6962 }
6963
6964 /* Current VMCS valid. */
6965 if (!IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
6966 {
6967 Log(("%s: VMCS pointer %#RGp invalid -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
6968 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrInvalid;
6969 iemVmxVmFailInvalid(pVCpu);
6970 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6971 return VINF_SUCCESS;
6972 }
6973
6974 /** @todo Distinguish block-by-MOV-SS from block-by-STI. Currently we
6975 * use block-by-STI here which is not quite correct. */
6976 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
6977 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
6978 {
6979 Log(("%s: VM entry with events blocked by MOV SS -> VMFail\n", pszInstr));
6980 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_BlocKMovSS;
6981 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_BLOCK_MOVSS);
6982 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6983 return VINF_SUCCESS;
6984 }
6985
6986 if (uInstrId == VMXINSTRID_VMLAUNCH)
6987 {
6988 /* VMLAUNCH with non-clear VMCS. */
6989 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_STATE_CLEAR)
6990 { /* likely */ }
6991 else
6992 {
6993 Log(("vmlaunch: VMLAUNCH with non-clear VMCS -> VMFail\n"));
6994 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsClear;
6995 iemVmxVmFail(pVCpu, VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS);
6996 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6997 return VINF_SUCCESS;
6998 }
6999 }
7000 else
7001 {
7002 /* VMRESUME with non-launched VMCS. */
7003 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_STATE_LAUNCHED)
7004 { /* likely */ }
7005 else
7006 {
7007 Log(("vmresume: VMRESUME with non-launched VMCS -> VMFail\n"));
7008 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsLaunch;
7009 iemVmxVmFail(pVCpu, VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS);
7010 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7011 return VINF_SUCCESS;
7012 }
7013 }
7014
7015 /*
7016 * Load the current VMCS.
7017 */
7018 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
7019 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs),
7020 IEM_VMX_GET_CURRENT_VMCS(pVCpu), VMX_V_VMCS_SIZE);
7021 if (RT_FAILURE(rc))
7022 {
7023 Log(("%s: Failed to read VMCS at %#RGp, rc=%Rrc\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu), rc));
7024 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrReadPhys;
7025 return rc;
7026 }
7027
7028 /*
7029 * We are allowed to cache VMCS related data structures (such as I/O bitmaps, MSR bitmaps)
7030 * while entering VMX non-root mode. We do some of this while checking VM-execution
7031 * controls. The guest hypervisor should not make assumptions and cannot expect
7032 * predictable behavior if changes to these structures are made in guest memory while
7033 * executing in VMX non-root mode. As far as VirtualBox is concerned, the guest cannot
7034 * modify them anyway as we cache them in host memory. We are trade memory for speed here.
7035 *
7036 * See Intel spec. 24.11.4 "Software Access to Related Structures".
7037 */
7038 rc = iemVmxVmentryCheckExecCtls(pVCpu, pszInstr);
7039 if (RT_SUCCESS(rc))
7040 {
7041 rc = iemVmxVmentryCheckExitCtls(pVCpu, pszInstr);
7042 if (RT_SUCCESS(rc))
7043 {
7044 rc = iemVmxVmentryCheckEntryCtls(pVCpu, pszInstr);
7045 if (RT_SUCCESS(rc))
7046 {
7047 rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr);
7048 if (RT_SUCCESS(rc))
7049 {
7050 /* Save the guest force-flags as VM-exits can occur from this point on. */
7051 iemVmxVmentrySaveForceFlags(pVCpu);
7052
7053 /* Initialize the VM-exit qualification field as it MBZ for VM-exits where it isn't specified. */
7054 iemVmxVmcsSetExitQual(pVCpu, 0);
7055
7056 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
7057 if (RT_SUCCESS(rc))
7058 {
7059 rc = iemVmxVmentryLoadGuestState(pVCpu, pszInstr);
7060 if (RT_SUCCESS(rc))
7061 {
7062 rc = iemVmxVmentryLoadGuestAutoMsrs(pVCpu, pszInstr);
7063 if (RT_SUCCESS(rc))
7064 {
7065 Assert(rc != VINF_CPUM_R3_MSR_WRITE);
7066
7067 /* VMLAUNCH instruction must update the VMCS launch state. */
7068 if (uInstrId == VMXINSTRID_VMLAUNCH)
7069 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = VMX_V_VMCS_STATE_LAUNCHED;
7070
7071 /* Perform the VMX transition (PGM updates). */
7072 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
7073 if (rcStrict == VINF_SUCCESS)
7074 { /* likely */ }
7075 else if (RT_SUCCESS(rcStrict))
7076 {
7077 Log3(("%s: iemVmxWorldSwitch returns %Rrc -> Setting passup status\n", pszInstr,
7078 VBOXSTRICTRC_VAL(rcStrict)));
7079 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7080 }
7081 else
7082 {
7083 Log3(("%s: iemVmxWorldSwitch failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
7084 return rcStrict;
7085 }
7086
7087 /* We've now entered nested-guest execution. */
7088 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true;
7089
7090 /*
7091 * The priority of potential VM-exits during VM-entry is important.
7092 * The priorities of VM-exits and events are listed from highest
7093 * to lowest as follows:
7094 *
7095 * 1. Event injection.
7096 * 2. TPR below threshold / APIC-write.
7097 * 3. SMI.
7098 * 4. INIT.
7099 * 5. MTF exit.
7100 * 6. Pending debug exceptions.
7101 * 7. Debug-trap exceptions.
7102 * 8. VMX-preemption timer.
7103 * 9. NMI-window exit.
7104 * 10. NMI injection.
7105 * 11. Interrupt-window exit.
7106 * 12. Interrupt injection.
7107 */
7108
7109 /* Setup the VMX-preemption timer. */
7110 iemVmxVmentrySetupPreemptTimer(pVCpu, pszInstr);
7111
7112 /* Now that we've switched page tables, we can inject events if any. */
7113 iemVmxVmentryInjectEvent(pVCpu, pszInstr);
7114
7115 return VINF_SUCCESS;
7116 }
7117 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED);
7118 }
7119 }
7120 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED);
7121 }
7122
7123 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_HOST_STATE);
7124 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7125 return VINF_SUCCESS;
7126 }
7127 }
7128 }
7129
7130 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
7131 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7132 return VINF_SUCCESS;
7133}
7134
7135
7136/**
7137 * Checks whether an RDMSR or WRMSR instruction for the given MSR is intercepted
7138 * (causes a VM-exit) or not.
7139 *
7140 * @returns @c true if the instruction is intercepted, @c false otherwise.
7141 * @param pVCpu The cross context virtual CPU structure.
7142 * @param uExitReason The VM-exit exit reason (VMX_EXIT_RDMSR or
7143 * VMX_EXIT_WRMSR).
7144 * @param idMsr The MSR.
7145 */
7146IEM_STATIC bool iemVmxIsRdmsrWrmsrInterceptSet(PVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr)
7147{
7148 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
7149 Assert( uExitReason == VMX_EXIT_RDMSR
7150 || uExitReason == VMX_EXIT_WRMSR);
7151
7152 /* Consult the MSR bitmap if the feature is supported. */
7153 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7154 Assert(pVmcs);
7155 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
7156 {
7157 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
7158 if (uExitReason == VMX_EXIT_RDMSR)
7159 {
7160 VMXMSREXITREAD enmRead;
7161 int rc = HMVmxGetMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, &enmRead,
7162 NULL /* penmWrite */);
7163 AssertRC(rc);
7164 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
7165 return true;
7166 }
7167 else
7168 {
7169 VMXMSREXITWRITE enmWrite;
7170 int rc = HMVmxGetMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, NULL /* penmRead */,
7171 &enmWrite);
7172 AssertRC(rc);
7173 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
7174 return true;
7175 }
7176 return false;
7177 }
7178
7179 /* Without MSR bitmaps, all MSR accesses are intercepted. */
7180 return true;
7181}
7182
7183
7184/**
7185 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field is
7186 * intercepted (causes a VM-exit) or not.
7187 *
7188 * @returns @c true if the instruction is intercepted, @c false otherwise.
7189 * @param pVCpu The cross context virtual CPU structure.
7190 * @param u64FieldEnc The VMCS field encoding.
7191 * @param uExitReason The VM-exit exit reason (VMX_EXIT_VMREAD or
7192 * VMX_EXIT_VMREAD).
7193 */
7194IEM_STATIC bool iemVmxIsVmreadVmwriteInterceptSet(PVMCPU pVCpu, uint32_t uExitReason, uint64_t u64FieldEnc)
7195{
7196 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
7197 Assert( uExitReason == VMX_EXIT_VMREAD
7198 || uExitReason == VMX_EXIT_VMWRITE);
7199
7200 /* Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted. */
7201 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing)
7202 return true;
7203
7204 /*
7205 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE is intercepted.
7206 * This excludes any reserved bits in the valid parts of the field encoding (i.e. bit 12).
7207 */
7208 if (u64FieldEnc & VMX_VMCS_ENC_RSVD_MASK)
7209 return true;
7210
7211 /* Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not. */
7212 uint32_t u32FieldEnc = RT_LO_U32(u64FieldEnc);
7213 Assert(u32FieldEnc >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
7214 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
7215 uint8_t const *pbBitmap = uExitReason == VMX_EXIT_VMREAD
7216 ? (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap)
7217 : (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap);
7218 pbBitmap += (u32FieldEnc >> 3);
7219 if (*pbBitmap & RT_BIT(u32FieldEnc & 7))
7220 return true;
7221
7222 return false;
7223}
7224
7225
7226/**
7227 * VMREAD common (memory/register) instruction execution worker
7228 *
7229 * @returns Strict VBox status code.
7230 * @param pVCpu The cross context virtual CPU structure.
7231 * @param cbInstr The instruction length in bytes.
7232 * @param pu64Dst Where to write the VMCS value (only updated when
7233 * VINF_SUCCESS is returned).
7234 * @param u64FieldEnc The VMCS field encoding.
7235 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7236 * be NULL.
7237 */
7238IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
7239 PCVMXVEXITINFO pExitInfo)
7240{
7241 /* Nested-guest intercept. */
7242 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7243 && iemVmxIsVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMREAD, u64FieldEnc))
7244 {
7245 if (pExitInfo)
7246 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7247 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMREAD, VMXINSTRID_VMREAD, cbInstr);
7248 }
7249
7250 /* CPL. */
7251 if (pVCpu->iem.s.uCpl > 0)
7252 {
7253 Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7254 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Cpl;
7255 return iemRaiseGeneralProtectionFault0(pVCpu);
7256 }
7257
7258 /* VMCS pointer in root mode. */
7259 if ( IEM_VMX_IS_ROOT_MODE(pVCpu)
7260 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
7261 {
7262 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7263 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrInvalid;
7264 iemVmxVmFailInvalid(pVCpu);
7265 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7266 return VINF_SUCCESS;
7267 }
7268
7269 /* VMCS-link pointer in non-root mode. */
7270 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7271 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
7272 {
7273 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
7274 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_LinkPtrInvalid;
7275 iemVmxVmFailInvalid(pVCpu);
7276 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7277 return VINF_SUCCESS;
7278 }
7279
7280 /* Supported VMCS field. */
7281 if (iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
7282 {
7283 Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
7284 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_FieldInvalid;
7285 iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
7286 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7287 return VINF_SUCCESS;
7288 }
7289
7290 /*
7291 * Setup reading from the current or shadow VMCS.
7292 */
7293 uint8_t *pbVmcs;
7294 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7295 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
7296 else
7297 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7298 Assert(pbVmcs);
7299
7300 VMXVMCSFIELDENC FieldEnc;
7301 FieldEnc.u = RT_LO_U32(u64FieldEnc);
7302 uint8_t const uWidth = FieldEnc.n.u2Width;
7303 uint8_t const uType = FieldEnc.n.u2Type;
7304 uint8_t const uWidthType = (uWidth << 2) | uType;
7305 uint8_t const uIndex = FieldEnc.n.u8Index;
7306 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
7307 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
7308
7309 /*
7310 * Read the VMCS component based on the field's effective width.
7311 *
7312 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
7313 * indicates high bits (little endian).
7314 *
7315 * Note! The caller is responsible to trim the result and update registers
7316 * or memory locations are required. Here we just zero-extend to the largest
7317 * type (i.e. 64-bits).
7318 */
7319 uint8_t *pbField = pbVmcs + offField;
7320 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
7321 switch (uEffWidth)
7322 {
7323 case VMX_VMCS_ENC_WIDTH_64BIT:
7324 case VMX_VMCS_ENC_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break;
7325 case VMX_VMCS_ENC_WIDTH_32BIT: *pu64Dst = *(uint32_t *)pbField; break;
7326 case VMX_VMCS_ENC_WIDTH_16BIT: *pu64Dst = *(uint16_t *)pbField; break;
7327 }
7328 return VINF_SUCCESS;
7329}
7330
7331
7332/**
7333 * VMREAD (64-bit register) instruction execution worker.
7334 *
7335 * @returns Strict VBox status code.
7336 * @param pVCpu The cross context virtual CPU structure.
7337 * @param cbInstr The instruction length in bytes.
7338 * @param pu64Dst Where to store the VMCS field's value.
7339 * @param u64FieldEnc The VMCS field encoding.
7340 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7341 * be NULL.
7342 */
7343IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
7344 PCVMXVEXITINFO pExitInfo)
7345{
7346 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
7347 if (rcStrict == VINF_SUCCESS)
7348 {
7349 iemVmxVmreadSuccess(pVCpu, cbInstr);
7350 return VINF_SUCCESS;
7351 }
7352
7353 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7354 return rcStrict;
7355}
7356
7357
7358/**
7359 * VMREAD (32-bit register) instruction execution worker.
7360 *
7361 * @returns Strict VBox status code.
7362 * @param pVCpu The cross context virtual CPU structure.
7363 * @param cbInstr The instruction length in bytes.
7364 * @param pu32Dst Where to store the VMCS field's value.
7365 * @param u32FieldEnc The VMCS field encoding.
7366 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7367 * be NULL.
7368 */
7369IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPU pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint64_t u32FieldEnc,
7370 PCVMXVEXITINFO pExitInfo)
7371{
7372 uint64_t u64Dst;
7373 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u32FieldEnc, pExitInfo);
7374 if (rcStrict == VINF_SUCCESS)
7375 {
7376 *pu32Dst = u64Dst;
7377 iemVmxVmreadSuccess(pVCpu, cbInstr);
7378 return VINF_SUCCESS;
7379 }
7380
7381 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7382 return rcStrict;
7383}
7384
7385
7386/**
7387 * VMREAD (memory) instruction execution worker.
7388 *
7389 * @returns Strict VBox status code.
7390 * @param pVCpu The cross context virtual CPU structure.
7391 * @param cbInstr The instruction length in bytes.
7392 * @param iEffSeg The effective segment register to use with @a u64Val.
7393 * Pass UINT8_MAX if it is a register access.
7394 * @param enmEffAddrMode The effective addressing mode (only used with memory
7395 * operand).
7396 * @param GCPtrDst The guest linear address to store the VMCS field's
7397 * value.
7398 * @param u64FieldEnc The VMCS field encoding.
7399 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7400 * be NULL.
7401 */
7402IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode,
7403 RTGCPTR GCPtrDst, uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
7404{
7405 uint64_t u64Dst;
7406 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u64FieldEnc, pExitInfo);
7407 if (rcStrict == VINF_SUCCESS)
7408 {
7409 /*
7410 * Write the VMCS field's value to the location specified in guest-memory.
7411 *
7412 * The pointer size depends on the address size (address-size prefix allowed).
7413 * The operand size depends on IA-32e mode (operand-size prefix not allowed).
7414 */
7415 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
7416 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
7417 GCPtrDst &= s_auAddrSizeMasks[enmEffAddrMode];
7418
7419 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7420 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
7421 else
7422 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
7423 if (rcStrict == VINF_SUCCESS)
7424 {
7425 iemVmxVmreadSuccess(pVCpu, cbInstr);
7426 return VINF_SUCCESS;
7427 }
7428
7429 Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
7430 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrMap;
7431 return rcStrict;
7432 }
7433
7434 Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7435 return rcStrict;
7436}
7437
7438
7439/**
7440 * VMWRITE instruction execution worker.
7441 *
7442 * @returns Strict VBox status code.
7443 * @param pVCpu The cross context virtual CPU structure.
7444 * @param cbInstr The instruction length in bytes.
7445 * @param iEffSeg The effective segment register to use with @a u64Val.
7446 * Pass UINT8_MAX if it is a register access.
7447 * @param enmEffAddrMode The effective addressing mode (only used with memory
7448 * operand).
7449 * @param u64Val The value to write (or guest linear address to the
7450 * value), @a iEffSeg will indicate if it's a memory
7451 * operand.
7452 * @param u64FieldEnc The VMCS field encoding.
7453 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7454 * be NULL.
7455 */
7456IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, uint64_t u64Val,
7457 uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
7458{
7459 /* Nested-guest intercept. */
7460 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7461 && iemVmxIsVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMWRITE, u64FieldEnc))
7462 {
7463 if (pExitInfo)
7464 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7465 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMWRITE, VMXINSTRID_VMWRITE, cbInstr);
7466 }
7467
7468 /* CPL. */
7469 if (pVCpu->iem.s.uCpl > 0)
7470 {
7471 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7472 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Cpl;
7473 return iemRaiseGeneralProtectionFault0(pVCpu);
7474 }
7475
7476 /* VMCS pointer in root mode. */
7477 if ( IEM_VMX_IS_ROOT_MODE(pVCpu)
7478 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
7479 {
7480 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7481 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrInvalid;
7482 iemVmxVmFailInvalid(pVCpu);
7483 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7484 return VINF_SUCCESS;
7485 }
7486
7487 /* VMCS-link pointer in non-root mode. */
7488 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7489 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
7490 {
7491 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
7492 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_LinkPtrInvalid;
7493 iemVmxVmFailInvalid(pVCpu);
7494 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7495 return VINF_SUCCESS;
7496 }
7497
7498 /* If the VMWRITE instruction references memory, access the specified memory operand. */
7499 bool const fIsRegOperand = iEffSeg == UINT8_MAX;
7500 if (!fIsRegOperand)
7501 {
7502 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
7503 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
7504 RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[enmEffAddrMode];
7505
7506 /* Read the value from the specified guest memory location. */
7507 VBOXSTRICTRC rcStrict;
7508 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7509 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
7510 else
7511 {
7512 uint32_t u32Val;
7513 rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, iEffSeg, GCPtrVal);
7514 u64Val = u32Val;
7515 }
7516 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7517 {
7518 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
7519 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrMap;
7520 return rcStrict;
7521 }
7522 }
7523 else
7524 Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
7525
7526 /* Supported VMCS field. */
7527 if (!iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
7528 {
7529 Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
7530 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldInvalid;
7531 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
7532 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7533 return VINF_SUCCESS;
7534 }
7535
7536 /* Read-only VMCS field. */
7537 bool const fIsFieldReadOnly = HMVmxIsVmcsFieldReadOnly(u64FieldEnc);
7538 if ( fIsFieldReadOnly
7539 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
7540 {
7541 Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64FieldEnc));
7542 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldRo;
7543 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
7544 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7545 return VINF_SUCCESS;
7546 }
7547
7548 /*
7549 * Setup writing to the current or shadow VMCS.
7550 */
7551 uint8_t *pbVmcs;
7552 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7553 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
7554 else
7555 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7556 Assert(pbVmcs);
7557
7558 VMXVMCSFIELDENC FieldEnc;
7559 FieldEnc.u = RT_LO_U32(u64FieldEnc);
7560 uint8_t const uWidth = FieldEnc.n.u2Width;
7561 uint8_t const uType = FieldEnc.n.u2Type;
7562 uint8_t const uWidthType = (uWidth << 2) | uType;
7563 uint8_t const uIndex = FieldEnc.n.u8Index;
7564 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
7565 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
7566
7567 /*
7568 * Write the VMCS component based on the field's effective width.
7569 *
7570 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
7571 * indicates high bits (little endian).
7572 */
7573 uint8_t *pbField = pbVmcs + offField;
7574 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
7575 switch (uEffWidth)
7576 {
7577 case VMX_VMCS_ENC_WIDTH_64BIT:
7578 case VMX_VMCS_ENC_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
7579 case VMX_VMCS_ENC_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break;
7580 case VMX_VMCS_ENC_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break;
7581 }
7582
7583 iemVmxVmSucceed(pVCpu);
7584 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7585 return VINF_SUCCESS;
7586}
7587
7588
7589/**
7590 * VMCLEAR instruction execution worker.
7591 *
7592 * @returns Strict VBox status code.
7593 * @param pVCpu The cross context virtual CPU structure.
7594 * @param cbInstr The instruction length in bytes.
7595 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
7596 * @param GCPtrVmcs The linear address of the VMCS pointer.
7597 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7598 * be NULL.
7599 *
7600 * @remarks Common VMX instruction checks are already expected to by the caller,
7601 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7602 */
7603IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
7604 PCVMXVEXITINFO pExitInfo)
7605{
7606 /* Nested-guest intercept. */
7607 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7608 {
7609 if (pExitInfo)
7610 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7611 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMCLEAR, VMXINSTRID_NONE, cbInstr);
7612 }
7613
7614 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7615
7616 /* CPL. */
7617 if (pVCpu->iem.s.uCpl > 0)
7618 {
7619 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7620 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Cpl;
7621 return iemRaiseGeneralProtectionFault0(pVCpu);
7622 }
7623
7624 /* Get the VMCS pointer from the location specified by the source memory operand. */
7625 RTGCPHYS GCPhysVmcs;
7626 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
7627 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7628 {
7629 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
7630 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrMap;
7631 return rcStrict;
7632 }
7633
7634 /* VMCS pointer alignment. */
7635 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
7636 {
7637 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
7638 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAlign;
7639 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7640 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7641 return VINF_SUCCESS;
7642 }
7643
7644 /* VMCS physical-address width limits. */
7645 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
7646 {
7647 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
7648 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrWidth;
7649 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7650 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7651 return VINF_SUCCESS;
7652 }
7653
7654 /* VMCS is not the VMXON region. */
7655 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
7656 {
7657 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
7658 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrVmxon;
7659 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
7660 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7661 return VINF_SUCCESS;
7662 }
7663
7664 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
7665 restriction imposed by our implementation. */
7666 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
7667 {
7668 Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
7669 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAbnormal;
7670 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7671 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7672 return VINF_SUCCESS;
7673 }
7674
7675 /*
7676 * VMCLEAR allows committing and clearing any valid VMCS pointer.
7677 *
7678 * If the current VMCS is the one being cleared, set its state to 'clear' and commit
7679 * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
7680 * to 'clear'.
7681 */
7682 uint8_t const fVmcsStateClear = VMX_V_VMCS_STATE_CLEAR;
7683 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
7684 {
7685 Assert(GCPhysVmcs != NIL_RTGCPHYS); /* Paranoia. */
7686 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
7687 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsStateClear;
7688 iemVmxCommitCurrentVmcsToMemory(pVCpu);
7689 Assert(!IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
7690 }
7691 else
7692 {
7693 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPtrVmcs + RT_UOFFSETOF(VMXVVMCS, fVmcsState),
7694 (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear));
7695 }
7696
7697 iemVmxVmSucceed(pVCpu);
7698 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7699 return rcStrict;
7700}
7701
7702
7703/**
7704 * VMPTRST instruction execution worker.
7705 *
7706 * @returns Strict VBox status code.
7707 * @param pVCpu The cross context virtual CPU structure.
7708 * @param cbInstr The instruction length in bytes.
7709 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
7710 * @param GCPtrVmcs The linear address of where to store the current VMCS
7711 * pointer.
7712 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7713 * be NULL.
7714 *
7715 * @remarks Common VMX instruction checks are already expected to by the caller,
7716 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7717 */
7718IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
7719 PCVMXVEXITINFO pExitInfo)
7720{
7721 /* Nested-guest intercept. */
7722 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7723 {
7724 if (pExitInfo)
7725 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7726 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRST, VMXINSTRID_NONE, cbInstr);
7727 }
7728
7729 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7730
7731 /* CPL. */
7732 if (pVCpu->iem.s.uCpl > 0)
7733 {
7734 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7735 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Cpl;
7736 return iemRaiseGeneralProtectionFault0(pVCpu);
7737 }
7738
7739 /* Set the VMCS pointer to the location specified by the destination memory operand. */
7740 AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
7741 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
7742 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7743 {
7744 iemVmxVmSucceed(pVCpu);
7745 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7746 return rcStrict;
7747 }
7748
7749 Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7750 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_PtrMap;
7751 return rcStrict;
7752}
7753
7754
7755/**
7756 * VMPTRLD instruction execution worker.
7757 *
7758 * @returns Strict VBox status code.
7759 * @param pVCpu The cross context virtual CPU structure.
7760 * @param cbInstr The instruction length in bytes.
7761 * @param GCPtrVmcs The linear address of the current VMCS pointer.
7762 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7763 * be NULL.
7764 *
7765 * @remarks Common VMX instruction checks are already expected to by the caller,
7766 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7767 */
7768IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
7769 PCVMXVEXITINFO pExitInfo)
7770{
7771 /* Nested-guest intercept. */
7772 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7773 {
7774 if (pExitInfo)
7775 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7776 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRLD, VMXINSTRID_NONE, cbInstr);
7777 }
7778
7779 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7780
7781 /* CPL. */
7782 if (pVCpu->iem.s.uCpl > 0)
7783 {
7784 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7785 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Cpl;
7786 return iemRaiseGeneralProtectionFault0(pVCpu);
7787 }
7788
7789 /* Get the VMCS pointer from the location specified by the source memory operand. */
7790 RTGCPHYS GCPhysVmcs;
7791 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
7792 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7793 {
7794 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
7795 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrMap;
7796 return rcStrict;
7797 }
7798
7799 /* VMCS pointer alignment. */
7800 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
7801 {
7802 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
7803 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAlign;
7804 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
7805 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7806 return VINF_SUCCESS;
7807 }
7808
7809 /* VMCS physical-address width limits. */
7810 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
7811 {
7812 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
7813 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrWidth;
7814 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
7815 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7816 return VINF_SUCCESS;
7817 }
7818
7819 /* VMCS is not the VMXON region. */
7820 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
7821 {
7822 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
7823 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrVmxon;
7824 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
7825 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7826 return VINF_SUCCESS;
7827 }
7828
7829 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
7830 restriction imposed by our implementation. */
7831 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
7832 {
7833 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
7834 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAbnormal;
7835 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
7836 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7837 return VINF_SUCCESS;
7838 }
7839
7840 /* Read the VMCS revision ID from the VMCS. */
7841 VMXVMCSREVID VmcsRevId;
7842 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
7843 if (RT_FAILURE(rc))
7844 {
7845 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
7846 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrReadPhys;
7847 return rc;
7848 }
7849
7850 /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
7851 also check VMCS shadowing feature. */
7852 if ( VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
7853 || ( VmcsRevId.n.fIsShadowVmcs
7854 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
7855 {
7856 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
7857 {
7858 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
7859 VmcsRevId.n.u31RevisionId));
7860 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_VmcsRevId;
7861 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
7862 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7863 return VINF_SUCCESS;
7864 }
7865
7866 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
7867 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_ShadowVmcs;
7868 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
7869 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7870 return VINF_SUCCESS;
7871 }
7872
7873 /*
7874 * We only maintain only the current VMCS in our virtual CPU context (CPUMCTX). Therefore,
7875 * VMPTRLD shall always flush any existing current VMCS back to guest memory before loading
7876 * a new VMCS as current.
7877 */
7878 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
7879 {
7880 iemVmxCommitCurrentVmcsToMemory(pVCpu);
7881 IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
7882 }
7883
7884 iemVmxVmSucceed(pVCpu);
7885 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7886 return VINF_SUCCESS;
7887}
7888
7889
7890/**
7891 * VMXON instruction execution worker.
7892 *
7893 * @returns Strict VBox status code.
7894 * @param pVCpu The cross context virtual CPU structure.
7895 * @param cbInstr The instruction length in bytes.
7896 * @param iEffSeg The effective segment register to use with @a
7897 * GCPtrVmxon.
7898 * @param GCPtrVmxon The linear address of the VMXON pointer.
7899 * @param pExitInfo Pointer to the VM-exit instruction information struct.
7900 * Optional, can be NULL.
7901 *
7902 * @remarks Common VMX instruction checks are already expected to by the caller,
7903 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7904 */
7905IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
7906 PCVMXVEXITINFO pExitInfo)
7907{
7908#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
7909 RT_NOREF5(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
7910 return VINF_EM_RAW_EMULATE_INSTR;
7911#else
7912 if (!IEM_VMX_IS_ROOT_MODE(pVCpu))
7913 {
7914 /* CPL. */
7915 if (pVCpu->iem.s.uCpl > 0)
7916 {
7917 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7918 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cpl;
7919 return iemRaiseGeneralProtectionFault0(pVCpu);
7920 }
7921
7922 /* A20M (A20 Masked) mode. */
7923 if (!PGMPhysIsA20Enabled(pVCpu))
7924 {
7925 Log(("vmxon: A20M mode -> #GP(0)\n"));
7926 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_A20M;
7927 return iemRaiseGeneralProtectionFault0(pVCpu);
7928 }
7929
7930 /* CR0. */
7931 {
7932 /* CR0 MB1 bits. */
7933 uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
7934 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
7935 {
7936 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
7937 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed0;
7938 return iemRaiseGeneralProtectionFault0(pVCpu);
7939 }
7940
7941 /* CR0 MBZ bits. */
7942 uint64_t const uCr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
7943 if (pVCpu->cpum.GstCtx.cr0 & ~uCr0Fixed1)
7944 {
7945 Log(("vmxon: CR0 fixed1 bits set -> #GP(0)\n"));
7946 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed1;
7947 return iemRaiseGeneralProtectionFault0(pVCpu);
7948 }
7949 }
7950
7951 /* CR4. */
7952 {
7953 /* CR4 MB1 bits. */
7954 uint64_t const uCr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
7955 if ((pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0) != uCr4Fixed0)
7956 {
7957 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
7958 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed0;
7959 return iemRaiseGeneralProtectionFault0(pVCpu);
7960 }
7961
7962 /* CR4 MBZ bits. */
7963 uint64_t const uCr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
7964 if (pVCpu->cpum.GstCtx.cr4 & ~uCr4Fixed1)
7965 {
7966 Log(("vmxon: CR4 fixed1 bits set -> #GP(0)\n"));
7967 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed1;
7968 return iemRaiseGeneralProtectionFault0(pVCpu);
7969 }
7970 }
7971
7972 /* Feature control MSR's LOCK and VMXON bits. */
7973 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
7974 if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
7975 {
7976 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
7977 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_MsrFeatCtl;
7978 return iemRaiseGeneralProtectionFault0(pVCpu);
7979 }
7980
7981 /* Get the VMXON pointer from the location specified by the source memory operand. */
7982 RTGCPHYS GCPhysVmxon;
7983 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
7984 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7985 {
7986 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
7987 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrMap;
7988 return rcStrict;
7989 }
7990
7991 /* VMXON region pointer alignment. */
7992 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
7993 {
7994 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
7995 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAlign;
7996 iemVmxVmFailInvalid(pVCpu);
7997 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7998 return VINF_SUCCESS;
7999 }
8000
8001 /* VMXON physical-address width limits. */
8002 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
8003 {
8004 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
8005 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrWidth;
8006 iemVmxVmFailInvalid(pVCpu);
8007 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8008 return VINF_SUCCESS;
8009 }
8010
8011 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
8012 restriction imposed by our implementation. */
8013 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
8014 {
8015 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
8016 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAbnormal;
8017 iemVmxVmFailInvalid(pVCpu);
8018 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8019 return VINF_SUCCESS;
8020 }
8021
8022 /* Read the VMCS revision ID from the VMXON region. */
8023 VMXVMCSREVID VmcsRevId;
8024 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
8025 if (RT_FAILURE(rc))
8026 {
8027 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
8028 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrReadPhys;
8029 return rc;
8030 }
8031
8032 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
8033 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
8034 {
8035 /* Revision ID mismatch. */
8036 if (!VmcsRevId.n.fIsShadowVmcs)
8037 {
8038 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
8039 VmcsRevId.n.u31RevisionId));
8040 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmcsRevId;
8041 iemVmxVmFailInvalid(pVCpu);
8042 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8043 return VINF_SUCCESS;
8044 }
8045
8046 /* Shadow VMCS disallowed. */
8047 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
8048 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_ShadowVmcs;
8049 iemVmxVmFailInvalid(pVCpu);
8050 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8051 return VINF_SUCCESS;
8052 }
8053
8054 /*
8055 * Record that we're in VMX operation, block INIT, block and disable A20M.
8056 */
8057 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
8058 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
8059 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
8060
8061 /* Clear address-range monitoring. */
8062 EMMonitorWaitClear(pVCpu);
8063 /** @todo NSTVMX: Intel PT. */
8064
8065 iemVmxVmSucceed(pVCpu);
8066 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8067# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
8068 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
8069# else
8070 return VINF_SUCCESS;
8071# endif
8072 }
8073 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8074 {
8075 /* Nested-guest intercept. */
8076 if (pExitInfo)
8077 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
8078 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMXON, VMXINSTRID_NONE, cbInstr);
8079 }
8080
8081 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
8082
8083 /* CPL. */
8084 if (pVCpu->iem.s.uCpl > 0)
8085 {
8086 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8087 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxRootCpl;
8088 return iemRaiseGeneralProtectionFault0(pVCpu);
8089 }
8090
8091 /* VMXON when already in VMX root mode. */
8092 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
8093 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxAlreadyRoot;
8094 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8095 return VINF_SUCCESS;
8096#endif
8097}
8098
8099
8100/**
8101 * Implements 'VMXOFF'.
8102 *
8103 * @remarks Common VMX instruction checks are already expected to by the caller,
8104 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
8105 */
8106IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
8107{
8108# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
8109 RT_NOREF2(pVCpu, cbInstr);
8110 return VINF_EM_RAW_EMULATE_INSTR;
8111# else
8112 /* Nested-guest intercept. */
8113 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8114 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMXOFF, cbInstr);
8115
8116 /* CPL. */
8117 if (pVCpu->iem.s.uCpl > 0)
8118 {
8119 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
8120 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxoff_Cpl;
8121 return iemRaiseGeneralProtectionFault0(pVCpu);
8122 }
8123
8124 /* Dual monitor treatment of SMIs and SMM. */
8125 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
8126 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
8127 {
8128 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
8129 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8130 return VINF_SUCCESS;
8131 }
8132
8133 /* Record that we're no longer in VMX root operation, block INIT, block and disable A20M. */
8134 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
8135 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
8136
8137 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
8138 { /** @todo NSTVMX: Unblock SMI. */ }
8139
8140 EMMonitorWaitClear(pVCpu);
8141 /** @todo NSTVMX: Unblock and enable A20M. */
8142
8143 iemVmxVmSucceed(pVCpu);
8144 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8145# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
8146 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
8147# else
8148 return VINF_SUCCESS;
8149# endif
8150# endif
8151}
8152
8153
8154/**
8155 * Implements 'VMXON'.
8156 */
8157IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
8158{
8159 return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
8160}
8161
8162
8163/**
8164 * Implements 'VMLAUNCH'.
8165 */
8166IEM_CIMPL_DEF_0(iemCImpl_vmlaunch)
8167{
8168 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMLAUNCH, NULL /* pExitInfo */);
8169}
8170
8171
8172/**
8173 * Implements 'VMRESUME'.
8174 */
8175IEM_CIMPL_DEF_0(iemCImpl_vmresume)
8176{
8177 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMRESUME, NULL /* pExitInfo */);
8178}
8179
8180
8181/**
8182 * Implements 'VMPTRLD'.
8183 */
8184IEM_CIMPL_DEF_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
8185{
8186 return iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
8187}
8188
8189
8190/**
8191 * Implements 'VMPTRST'.
8192 */
8193IEM_CIMPL_DEF_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
8194{
8195 return iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
8196}
8197
8198
8199/**
8200 * Implements 'VMCLEAR'.
8201 */
8202IEM_CIMPL_DEF_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
8203{
8204 return iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
8205}
8206
8207
8208/**
8209 * Implements 'VMWRITE' register.
8210 */
8211IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64FieldEnc)
8212{
8213 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, IEMMODE_64BIT /* N/A */, u64Val, u64FieldEnc,
8214 NULL /* pExitInfo */);
8215}
8216
8217
8218/**
8219 * Implements 'VMWRITE' memory.
8220 */
8221IEM_CIMPL_DEF_4(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrVal, uint32_t, u64FieldEnc)
8222{
8223 return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrVal, u64FieldEnc, NULL /* pExitInfo */);
8224}
8225
8226
8227/**
8228 * Implements 'VMREAD' 64-bit register.
8229 */
8230IEM_CIMPL_DEF_2(iemCImpl_vmread64_reg, uint64_t *, pu64Dst, uint64_t, u64FieldEnc)
8231{
8232 return iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, NULL /* pExitInfo */);
8233}
8234
8235
8236/**
8237 * Implements 'VMREAD' 32-bit register.
8238 */
8239IEM_CIMPL_DEF_2(iemCImpl_vmread32_reg, uint32_t *, pu32Dst, uint32_t, u32FieldEnc)
8240{
8241 return iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u32FieldEnc, NULL /* pExitInfo */);
8242}
8243
8244
8245/**
8246 * Implements 'VMREAD' memory.
8247 */
8248IEM_CIMPL_DEF_4(iemCImpl_vmread_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrDst, uint32_t, u64FieldEnc)
8249{
8250 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, u64FieldEnc, NULL /* pExitInfo */);
8251}
8252
8253
8254/**
8255 * Implements VMX's implementation of PAUSE.
8256 */
8257IEM_CIMPL_DEF_0(iemCImpl_vmx_pause)
8258{
8259 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8260 {
8261 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrPause(pVCpu, cbInstr);
8262 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
8263 return rcStrict;
8264 }
8265
8266 /*
8267 * Outside VMX non-root operation or if the PAUSE instruction does not cause
8268 * a VM-exit, the instruction operates normally.
8269 */
8270 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
8271 return VINF_SUCCESS;
8272}
8273
8274#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
8275
8276
8277/**
8278 * Implements 'VMCALL'.
8279 */
8280IEM_CIMPL_DEF_0(iemCImpl_vmcall)
8281{
8282#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8283 /* Nested-guest intercept. */
8284 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
8285 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMCALL, cbInstr);
8286#endif
8287
8288 /* Join forces with vmmcall. */
8289 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
8290}
8291
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette