VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 74190

Last change on this file since 74190 was 74190, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 todo with reminder to set bit 31.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 167.5 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 74190 2018-09-11 10:45:48Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/**
20 * Implements 'VMCALL'.
21 */
22IEM_CIMPL_DEF_0(iemCImpl_vmcall)
23{
24 /** @todo NSTVMX: intercept. */
25
26 /* Join forces with vmmcall. */
27 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
28}
29
30#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
31/**
32 * Map of VMCS field encodings to their virtual-VMCS structure offsets.
33 *
34 * The first array dimension is VMCS field encoding of Width OR'ed with Type and the
35 * second dimension is the Index, see VMXVMCSFIELDENC.
36 */
37uint16_t const g_aoffVmcsMap[16][VMX_V_VMCS_MAX_INDEX + 1] =
38{
39 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
40 {
41 /* 0 */ RT_OFFSETOF(VMXVVMCS, u16Vpid),
42 /* 1 */ RT_OFFSETOF(VMXVVMCS, u16PostIntNotifyVector),
43 /* 2 */ RT_OFFSETOF(VMXVVMCS, u16EptpIndex),
44 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
45 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
46 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
47 },
48 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
49 {
50 /* 0-7 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
51 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
52 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
53 /* 24-25 */ UINT16_MAX, UINT16_MAX
54 },
55 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
56 {
57 /* 0 */ RT_OFFSETOF(VMXVVMCS, GuestEs),
58 /* 1 */ RT_OFFSETOF(VMXVVMCS, GuestCs),
59 /* 2 */ RT_OFFSETOF(VMXVVMCS, GuestSs),
60 /* 3 */ RT_OFFSETOF(VMXVVMCS, GuestDs),
61 /* 4 */ RT_OFFSETOF(VMXVVMCS, GuestFs),
62 /* 5 */ RT_OFFSETOF(VMXVVMCS, GuestGs),
63 /* 6 */ RT_OFFSETOF(VMXVVMCS, GuestLdtr),
64 /* 7 */ RT_OFFSETOF(VMXVVMCS, GuestTr),
65 /* 8 */ RT_OFFSETOF(VMXVVMCS, u16GuestIntStatus),
66 /* 9 */ RT_OFFSETOF(VMXVVMCS, u16PmlIndex),
67 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
68 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
69 },
70 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
71 {
72 /* 0 */ RT_OFFSETOF(VMXVVMCS, HostEs),
73 /* 1 */ RT_OFFSETOF(VMXVVMCS, HostCs),
74 /* 2 */ RT_OFFSETOF(VMXVVMCS, HostSs),
75 /* 3 */ RT_OFFSETOF(VMXVVMCS, HostDs),
76 /* 4 */ RT_OFFSETOF(VMXVVMCS, HostFs),
77 /* 5 */ RT_OFFSETOF(VMXVVMCS, HostGs),
78 /* 6 */ RT_OFFSETOF(VMXVVMCS, HostTr),
79 /* 7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
80 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
81 /* 23-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
82 },
83 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
84 {
85 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapA),
86 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
87 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
88 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64AddrExitMsrStore),
89 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64AddrExitMsrLoad),
90 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64AddrEntryMsrLoad),
91 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
92 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64AddrPml),
93 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64TscOffset),
94 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64AddrVirtApic),
95 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64AddrApicAccess),
96 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64AddrPostedIntDesc),
97 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64VmFuncCtls),
98 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64EptpPtr),
99 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap0),
100 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap1),
101 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap2),
102 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap3),
103 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64AddrEptpList),
104 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmreadBitmap),
105 /* 20 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmwriteBitmap),
106 /* 21 */ RT_OFFSETOF(VMXVVMCS, u64AddrXcptVeInfo),
107 /* 22 */ RT_OFFSETOF(VMXVVMCS, u64AddrXssBitmap),
108 /* 23 */ RT_OFFSETOF(VMXVVMCS, u64AddrEnclsBitmap),
109 /* 24 */ UINT16_MAX,
110 /* 25 */ RT_OFFSETOF(VMXVVMCS, u64TscMultiplier)
111 },
112 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
113 {
114 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64GuestPhysAddr),
115 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
116 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
117 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
118 /* 25 */ UINT16_MAX
119 },
120 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
121 {
122 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
123 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
124 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestPatMsr),
125 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEferMsr),
126 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
127 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte0),
128 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte1),
129 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte2),
130 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte3),
131 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
132 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
133 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
134 },
135 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
136 {
137 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostPatMsr),
138 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostEferMsr),
139 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
140 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
141 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
142 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
143 },
144 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
145 {
146 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32PinCtls),
147 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls),
148 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32XcptBitmap),
149 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMask),
150 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMatch),
151 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32Cr3TargetCount),
152 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32ExitCtls),
153 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
154 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
155 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32EntryCtls),
156 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
157 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32EntryIntInfo),
158 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
159 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32EntryInstrLen),
160 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32TprThreshold),
161 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls2),
162 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32PleGap),
163 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32PleWindow),
164 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
165 },
166 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
167 {
168 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32RoVmInstrError),
169 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32RoExitReason),
170 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32RoExitIntInfo),
171 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32RoExitErrCode),
172 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
173 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
174 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32RoExitInstrLen),
175 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32RoExitInstrInfo),
176 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
177 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
178 /* 24-25 */ UINT16_MAX, UINT16_MAX
179 },
180 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
181 {
182 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
183 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsLimit),
184 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsLimit),
185 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsLimit),
186 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
187 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsLimit),
188 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsLimit),
189 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
190 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrLimit),
191 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
192 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
193 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsAttr),
194 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsAttr),
195 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsAttr),
196 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsAttr),
197 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsAttr),
198 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsAttr),
199 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
200 /* 18 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrAttr),
201 /* 19 */ RT_OFFSETOF(VMXVVMCS, u32GuestIntrState),
202 /* 20 */ RT_OFFSETOF(VMXVVMCS, u32GuestActivityState),
203 /* 21 */ RT_OFFSETOF(VMXVVMCS, u32GuestSmBase),
204 /* 22 */ RT_OFFSETOF(VMXVVMCS, u32GuestSysenterCS),
205 /* 23 */ RT_OFFSETOF(VMXVVMCS, u32PreemptTimer),
206 /* 24-25 */ UINT16_MAX, UINT16_MAX
207 },
208 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
209 {
210 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32HostSysenterCs),
211 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
212 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
213 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
214 /* 25 */ UINT16_MAX
215 },
216 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_CONTROL: */
217 {
218 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64Cr0Mask),
219 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64Cr4Mask),
220 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
221 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
222 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target0),
223 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target1),
224 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target2),
225 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target3),
226 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
227 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
228 /* 24-25 */ UINT16_MAX, UINT16_MAX
229 },
230 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
231 {
232 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64ExitQual),
233 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64IoRcx),
234 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64IoRsi),
235 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64IoRdi),
236 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64IoRip),
237 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestLinearAddr),
238 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
239 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
240 /* 22-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
241 },
242 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
243 {
244 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr0),
245 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr3),
246 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr4),
247 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEsBase),
248 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestCsBase),
249 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestSsBase),
250 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestDsBase),
251 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestFsBase),
252 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestGsBase),
253 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestLdtrBase),
254 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64GuestTrBase),
255 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64GuestGdtrBase),
256 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64GuestIdtrBase),
257 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64GuestDr7),
258 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64GuestRsp),
259 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64GuestRip),
260 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64GuestRFlags),
261 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpt),
262 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
263 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEip),
264 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
265 },
266 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_HOST_STATE: */
267 {
268 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostCr0),
269 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostCr3),
270 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostCr4),
271 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64HostFsBase),
272 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64HostGsBase),
273 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64HostTrBase),
274 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64HostGdtrBase),
275 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64HostIdtrBase),
276 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEsp),
277 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEip),
278 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64HostRsp),
279 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64HostRip),
280 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
281 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
282 }
283};
284
285
286/**
287 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
288 * relative offsets.
289 */
290# ifdef IEM_WITH_CODE_TLB
291# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
292# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
293# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
294# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
295# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
296# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
297# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
298# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
299# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
300# else /* !IEM_WITH_CODE_TLB */
301# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
302 do \
303 { \
304 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
305 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
306 } while (0)
307
308# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
309
310# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
311 do \
312 { \
313 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
314 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
315 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
316 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
317 } while (0)
318
319# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
320 do \
321 { \
322 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
323 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
324 } while (0)
325
326# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
327 do \
328 { \
329 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
330 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
331 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
332 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
333 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
334 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
335 } while (0)
336
337# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
338 do \
339 { \
340 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
341 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
342 } while (0)
343
344# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
345 do \
346 { \
347 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
348 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
349 } while (0)
350
351# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
352 do \
353 { \
354 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
355 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
356 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
357 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
358 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
359 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
360 } while (0)
361# endif /* !IEM_WITH_CODE_TLB */
362
363/** Whether a shadow VMCS is present for the given VCPU. */
364#define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
365
366/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
367#define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u64VmcsLinkPtr.u)
368
369/** Gets the VMXON region pointer. */
370#define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
371
372/** Whether a current VMCS is present for the given VCPU. */
373#define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
374
375/** Gets the guest-physical address of the current VMCS for the given VCPU. */
376#define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
377
378/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
379#define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
380 do \
381 { \
382 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
383 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
384 } while (0)
385
386/** Clears any current VMCS for the given VCPU. */
387#define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
388 do \
389 { \
390 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
391 } while (0)
392
393/** Check the common VMX instruction preconditions.
394 * @note Any changes here, also check if IEMOP_HLP_VMX_INSTR needs updating.
395 */
396#define IEM_VMX_INSTR_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
397 do { \
398 if ( !IEM_IS_REAL_OR_V86_MODE(a_pVCpu) \
399 && ( !IEM_IS_LONG_MODE(a_pVCpu) \
400 || IEM_IS_64BIT_CODE(a_pVCpu))) \
401 { /* likely */ } \
402 else \
403 { \
404 if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \
405 { \
406 Log((a_szInstr ": Real or v8086 mode -> #UD\n")); \
407 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
408 return iemRaiseUndefinedOpcode(a_pVCpu); \
409 } \
410 if (IEM_IS_LONG_MODE(a_pVCpu) && !IEM_IS_64BIT_CODE(a_pVCpu)) \
411 { \
412 Log((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
413 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
414 return iemRaiseUndefinedOpcode(a_pVCpu); \
415 } \
416 } \
417 } while (0)
418
419/** Check for VMX instructions requiring to be in VMX operation.
420 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs udpating. */
421#define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
422 do \
423 { \
424 if (IEM_IS_VMX_ROOT_MODE(a_pVCpu)) \
425 { /* likely */ } \
426 else \
427 { \
428 Log((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
429 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
430 return iemRaiseUndefinedOpcode(a_pVCpu); \
431 } \
432 } while (0)
433
434/** Marks a VM-entry failure with a diagnostic reason, logs and returns. */
435#define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_InsDiag) \
436 do \
437 { \
438 Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_InsDiag), \
439 HMVmxGetDiagDesc(a_InsDiag), (a_pszFailure))); \
440 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_InsDiag); \
441 return VERR_VMX_VMENTRY_FAILED; \
442 } while (0)
443
444
445/**
446 * Returns whether the given VMCS field is valid and supported by our emulation.
447 *
448 * @param pVCpu The cross context virtual CPU structure.
449 * @param u64FieldEnc The VMCS field encoding.
450 *
451 * @remarks This takes into account the CPU features exposed to the guest.
452 */
453IEM_STATIC bool iemVmxIsVmcsFieldValid(PVMCPU pVCpu, uint64_t u64FieldEnc)
454{
455 uint32_t const uFieldEncHi = RT_HI_U32(u64FieldEnc);
456 uint32_t const uFieldEncLo = RT_LO_U32(u64FieldEnc);
457 if (!uFieldEncHi)
458 { /* likely */ }
459 else
460 return false;
461
462 PCCPUMFEATURES pFeat = IEM_GET_GUEST_CPU_FEATURES(pVCpu);
463 switch (uFieldEncLo)
464 {
465 /*
466 * 16-bit fields.
467 */
468 /* Control fields. */
469 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
470 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
471 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
472
473 /* Guest-state fields. */
474 case VMX_VMCS16_GUEST_ES_SEL:
475 case VMX_VMCS16_GUEST_CS_SEL:
476 case VMX_VMCS16_GUEST_SS_SEL:
477 case VMX_VMCS16_GUEST_DS_SEL:
478 case VMX_VMCS16_GUEST_FS_SEL:
479 case VMX_VMCS16_GUEST_GS_SEL:
480 case VMX_VMCS16_GUEST_LDTR_SEL:
481 case VMX_VMCS16_GUEST_TR_SEL:
482 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
483 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
484
485 /* Host-state fields. */
486 case VMX_VMCS16_HOST_ES_SEL:
487 case VMX_VMCS16_HOST_CS_SEL:
488 case VMX_VMCS16_HOST_SS_SEL:
489 case VMX_VMCS16_HOST_DS_SEL:
490 case VMX_VMCS16_HOST_FS_SEL:
491 case VMX_VMCS16_HOST_GS_SEL:
492 case VMX_VMCS16_HOST_TR_SEL: return true;
493
494 /*
495 * 64-bit fields.
496 */
497 /* Control fields. */
498 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
499 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
500 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
501 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
502 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
503 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
504 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
505 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
506 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
507 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
508 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
509 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
510 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
511 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
512 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
513 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
514 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
515 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
516 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
517 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
518 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
519 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
520 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
521 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
522 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
523 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
524 case VMX_VMCS64_CTRL_EPTP_FULL:
525 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
526 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
527 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
528 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
529 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
530 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
531 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
532 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
533 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
534 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
535 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
536 {
537 uint64_t const uVmFuncMsr = CPUMGetGuestIa32VmxVmFunc(pVCpu);
538 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
539 }
540 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
541 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
542 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
543 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
544 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
545 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
546 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
547 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
548 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
549 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
550 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
551 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
552
553 /* Read-only data fields. */
554 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
555 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
556
557 /* Guest-state fields. */
558 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
559 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
560 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
561 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
562 case VMX_VMCS64_GUEST_PAT_FULL:
563 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
564 case VMX_VMCS64_GUEST_EFER_FULL:
565 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
566 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
567 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
568 case VMX_VMCS64_GUEST_PDPTE0_FULL:
569 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
570 case VMX_VMCS64_GUEST_PDPTE1_FULL:
571 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
572 case VMX_VMCS64_GUEST_PDPTE2_FULL:
573 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
574 case VMX_VMCS64_GUEST_PDPTE3_FULL:
575 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
576 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
577 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
578
579 /* Host-state fields. */
580 case VMX_VMCS64_HOST_PAT_FULL:
581 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
582 case VMX_VMCS64_HOST_EFER_FULL:
583 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
584 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
585 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
586
587 /*
588 * 32-bit fields.
589 */
590 /* Control fields. */
591 case VMX_VMCS32_CTRL_PIN_EXEC:
592 case VMX_VMCS32_CTRL_PROC_EXEC:
593 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
594 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
595 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
596 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
597 case VMX_VMCS32_CTRL_EXIT:
598 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
599 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
600 case VMX_VMCS32_CTRL_ENTRY:
601 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
602 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
603 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
604 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
605 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
606 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
607 case VMX_VMCS32_CTRL_PLE_GAP:
608 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
609
610 /* Read-only data fields. */
611 case VMX_VMCS32_RO_VM_INSTR_ERROR:
612 case VMX_VMCS32_RO_EXIT_REASON:
613 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
614 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
615 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
616 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
617 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
618 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
619
620 /* Guest-state fields. */
621 case VMX_VMCS32_GUEST_ES_LIMIT:
622 case VMX_VMCS32_GUEST_CS_LIMIT:
623 case VMX_VMCS32_GUEST_SS_LIMIT:
624 case VMX_VMCS32_GUEST_DS_LIMIT:
625 case VMX_VMCS32_GUEST_FS_LIMIT:
626 case VMX_VMCS32_GUEST_GS_LIMIT:
627 case VMX_VMCS32_GUEST_LDTR_LIMIT:
628 case VMX_VMCS32_GUEST_TR_LIMIT:
629 case VMX_VMCS32_GUEST_GDTR_LIMIT:
630 case VMX_VMCS32_GUEST_IDTR_LIMIT:
631 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
632 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
633 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
634 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
635 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
636 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
637 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
638 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
639 case VMX_VMCS32_GUEST_INT_STATE:
640 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
641 case VMX_VMCS32_GUEST_SMBASE:
642 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
643 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
644
645 /* Host-state fields. */
646 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
647
648 /*
649 * Natural-width fields.
650 */
651 /* Control fields. */
652 case VMX_VMCS_CTRL_CR0_MASK:
653 case VMX_VMCS_CTRL_CR4_MASK:
654 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
655 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
656 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
657 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
658 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
659 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
660
661 /* Read-only data fields. */
662 case VMX_VMCS_RO_EXIT_QUALIFICATION:
663 case VMX_VMCS_RO_IO_RCX:
664 case VMX_VMCS_RO_IO_RSX:
665 case VMX_VMCS_RO_IO_RDI:
666 case VMX_VMCS_RO_IO_RIP:
667 case VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR: return true;
668
669 /* Guest-state fields. */
670 case VMX_VMCS_GUEST_CR0:
671 case VMX_VMCS_GUEST_CR3:
672 case VMX_VMCS_GUEST_CR4:
673 case VMX_VMCS_GUEST_ES_BASE:
674 case VMX_VMCS_GUEST_CS_BASE:
675 case VMX_VMCS_GUEST_SS_BASE:
676 case VMX_VMCS_GUEST_DS_BASE:
677 case VMX_VMCS_GUEST_FS_BASE:
678 case VMX_VMCS_GUEST_GS_BASE:
679 case VMX_VMCS_GUEST_LDTR_BASE:
680 case VMX_VMCS_GUEST_TR_BASE:
681 case VMX_VMCS_GUEST_GDTR_BASE:
682 case VMX_VMCS_GUEST_IDTR_BASE:
683 case VMX_VMCS_GUEST_DR7:
684 case VMX_VMCS_GUEST_RSP:
685 case VMX_VMCS_GUEST_RIP:
686 case VMX_VMCS_GUEST_RFLAGS:
687 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
688 case VMX_VMCS_GUEST_SYSENTER_ESP:
689 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
690
691 /* Host-state fields. */
692 case VMX_VMCS_HOST_CR0:
693 case VMX_VMCS_HOST_CR3:
694 case VMX_VMCS_HOST_CR4:
695 case VMX_VMCS_HOST_FS_BASE:
696 case VMX_VMCS_HOST_GS_BASE:
697 case VMX_VMCS_HOST_TR_BASE:
698 case VMX_VMCS_HOST_GDTR_BASE:
699 case VMX_VMCS_HOST_IDTR_BASE:
700 case VMX_VMCS_HOST_SYSENTER_ESP:
701 case VMX_VMCS_HOST_SYSENTER_EIP:
702 case VMX_VMCS_HOST_RSP:
703 case VMX_VMCS_HOST_RIP: return true;
704 }
705
706 return false;
707}
708
709
710/**
711 * Gets a guest segment register from the VMCS given its index.
712 *
713 * @returns VBox status code.
714 * @param pVmcs Pointer to the virtual VMCS.
715 * @param iSegReg The index of the segment register (X86_SREG_XXX).
716 * @param pSelReg Where to store the segment register (only updated when
717 * VINF_SUCCESS is returned).
718 *
719 * @remarks Warning! This does not validate the contents of the retreived segment
720 * register.
721 */
722IEM_STATIC int iemVmxVmcsGetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCPUMSELREG pSelReg)
723{
724 Assert(pSelReg);
725 Assert(iSegReg < X86_SREG_COUNT);
726
727 /* Selector. */
728 uint16_t u16Sel;
729 {
730 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
731 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
732 uint8_t const uWidthType = (uWidth << 2) | uType;
733 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
734 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
735 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
736 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
737 uint8_t const *pbField = pbVmcs + offField;
738 u16Sel = *(uint16_t *)pbField;
739 }
740
741 /* Limit. */
742 uint32_t u32Limit;
743 {
744 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
745 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
746 uint8_t const uWidthType = (uWidth << 2) | uType;
747 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
748 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
749 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
750 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
751 uint8_t const *pbField = pbVmcs + offField;
752 u32Limit = *(uint32_t *)pbField;
753 }
754
755 /* Base. */
756 uint64_t u64Base;
757 {
758 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
759 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
760 uint8_t const uWidthType = (uWidth << 2) | uType;
761 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
762 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
763 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
764 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
765 uint8_t const *pbField = pbVmcs + offField;
766 u64Base = *(uint64_t *)pbField;
767 /** @todo NSTVMX: Should we zero out high bits here for 32-bit virtual CPUs? */
768 }
769
770 /* Attributes. */
771 uint32_t u32Attr;
772 {
773 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
774 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
775 uint8_t const uWidthType = (uWidth << 2) | uType;
776 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
777 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
778 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
779 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
780 uint8_t const *pbField = pbVmcs + offField;
781 u32Attr = *(uint32_t *)pbField;
782 }
783
784 pSelReg->Sel = u16Sel;
785 pSelReg->u32Limit = u32Limit;
786 pSelReg->u64Base = u64Base;
787 pSelReg->Attr.u = u32Attr;
788 return VINF_SUCCESS;
789}
790
791
792/**
793 * Gets VM-exit instruction information along with any displacement for an
794 * instruction VM-exit.
795 *
796 * @returns The VM-exit instruction information.
797 * @param pVCpu The cross context virtual CPU structure.
798 * @param uExitReason The VM-exit reason.
799 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_XXX) if
800 * any. Pass VMXINSTRID_NONE otherwise.
801 * @param fPrimaryOpRead If the primary operand of the ModR/M byte (bits 0:3) is
802 * a read or write.
803 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
804 * NULL.
805 */
806IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, bool fPrimaryOpRead,
807 PRTGCPTR pGCPtrDisp)
808{
809 RTGCPTR GCPtrDisp;
810 VMXEXITINSTRINFO ExitInstrInfo;
811 ExitInstrInfo.u = 0;
812
813 /*
814 * Get and parse the ModR/M byte from our decoded opcodes.
815 */
816 uint8_t bRm;
817 uint8_t const offModRm = pVCpu->iem.s.offModRm;
818 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
819 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
820 {
821 /*
822 * ModR/M indicates register addressing.
823 *
824 * The primary/secondary register operands are reported in the iReg1 or iReg2
825 * fields depending on whether it is a read/write form.
826 */
827 uint8_t idxReg1;
828 uint8_t idxReg2;
829 if (fPrimaryOpRead)
830 {
831 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
832 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
833 }
834 else
835 {
836 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
837 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
838 }
839 ExitInstrInfo.All.u2Scaling = 0;
840 ExitInstrInfo.All.iReg1 = idxReg1;
841 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
842 ExitInstrInfo.All.fIsRegOperand = 1;
843 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
844 ExitInstrInfo.All.iSegReg = 0;
845 ExitInstrInfo.All.iIdxReg = 0;
846 ExitInstrInfo.All.fIdxRegInvalid = 1;
847 ExitInstrInfo.All.iBaseReg = 0;
848 ExitInstrInfo.All.fBaseRegInvalid = 1;
849 ExitInstrInfo.All.iReg2 = idxReg2;
850
851 /* Displacement not applicable for register addressing. */
852 GCPtrDisp = 0;
853 }
854 else
855 {
856 /*
857 * ModR/M indicates memory addressing.
858 */
859 uint8_t uScale = 0;
860 bool fBaseRegValid = false;
861 bool fIdxRegValid = false;
862 uint8_t iBaseReg = 0;
863 uint8_t iIdxReg = 0;
864 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
865 {
866 /*
867 * Parse the ModR/M, displacement for 16-bit addressing mode.
868 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
869 */
870 uint16_t u16Disp = 0;
871 uint8_t const offDisp = offModRm + sizeof(bRm);
872 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
873 {
874 /* Displacement without any registers. */
875 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
876 }
877 else
878 {
879 /* Register (index and base). */
880 switch (bRm & X86_MODRM_RM_MASK)
881 {
882 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
883 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
884 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
885 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
886 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
887 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
888 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
889 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
890 }
891
892 /* Register + displacement. */
893 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
894 {
895 case 0: break;
896 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
897 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
898 default:
899 {
900 /* Register addressing, handled at the beginning. */
901 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
902 break;
903 }
904 }
905 }
906
907 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
908 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
909 }
910 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
911 {
912 /*
913 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
914 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
915 */
916 uint32_t u32Disp = 0;
917 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
918 {
919 /* Displacement without any registers. */
920 uint8_t const offDisp = offModRm + sizeof(bRm);
921 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
922 }
923 else
924 {
925 /* Register (and perhaps scale, index and base). */
926 uint8_t offDisp = offModRm + sizeof(bRm);
927 iBaseReg = (bRm & X86_MODRM_RM_MASK);
928 if (iBaseReg == 4)
929 {
930 /* An SIB byte follows the ModR/M byte, parse it. */
931 uint8_t bSib;
932 uint8_t const offSib = offModRm + sizeof(bRm);
933 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
934
935 /* A displacement may follow SIB, update its offset. */
936 offDisp += sizeof(bSib);
937
938 /* Get the scale. */
939 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
940
941 /* Get the index register. */
942 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
943 fIdxRegValid = RT_BOOL(iIdxReg != 4);
944
945 /* Get the base register. */
946 iBaseReg = bSib & X86_SIB_BASE_MASK;
947 fBaseRegValid = true;
948 if (iBaseReg == 5)
949 {
950 if ((bRm & X86_MODRM_MOD_MASK) == 0)
951 {
952 /* Mod is 0 implies a 32-bit displacement with no base. */
953 fBaseRegValid = false;
954 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
955 }
956 else
957 {
958 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
959 iBaseReg = X86_GREG_xBP;
960 }
961 }
962 }
963
964 /* Register + displacement. */
965 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
966 {
967 case 0: /* Handled above */ break;
968 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
969 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
970 default:
971 {
972 /* Register addressing, handled at the beginning. */
973 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
974 break;
975 }
976 }
977 }
978
979 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
980 }
981 else
982 {
983 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
984
985 /*
986 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
987 * See Intel instruction spec. 2.2 "IA-32e Mode".
988 */
989 uint64_t u64Disp = 0;
990 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
991 if (fRipRelativeAddr)
992 {
993 /*
994 * RIP-relative addressing mode.
995 *
996 * The displacment is 32-bit signed implying an offset range of +/-2G.
997 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
998 */
999 uint8_t const offDisp = offModRm + sizeof(bRm);
1000 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
1001 }
1002 else
1003 {
1004 uint8_t offDisp = offModRm + sizeof(bRm);
1005
1006 /*
1007 * Register (and perhaps scale, index and base).
1008 *
1009 * REX.B extends the most-significant bit of the base register. However, REX.B
1010 * is ignored while determining whether an SIB follows the opcode. Hence, we
1011 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
1012 *
1013 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
1014 */
1015 iBaseReg = (bRm & X86_MODRM_RM_MASK);
1016 if (iBaseReg == 4)
1017 {
1018 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
1019 uint8_t bSib;
1020 uint8_t const offSib = offModRm + sizeof(bRm);
1021 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
1022
1023 /* Displacement may follow SIB, update its offset. */
1024 offDisp += sizeof(bSib);
1025
1026 /* Get the scale. */
1027 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
1028
1029 /* Get the index. */
1030 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
1031 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
1032
1033 /* Get the base. */
1034 iBaseReg = (bSib & X86_SIB_BASE_MASK);
1035 fBaseRegValid = true;
1036 if (iBaseReg == 5)
1037 {
1038 if ((bRm & X86_MODRM_MOD_MASK) == 0)
1039 {
1040 /* Mod is 0 implies a signed 32-bit displacement with no base. */
1041 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
1042 }
1043 else
1044 {
1045 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
1046 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
1047 }
1048 }
1049 }
1050 iBaseReg |= pVCpu->iem.s.uRexB;
1051
1052 /* Register + displacement. */
1053 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1054 {
1055 case 0: /* Handled above */ break;
1056 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
1057 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
1058 default:
1059 {
1060 /* Register addressing, handled at the beginning. */
1061 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
1062 break;
1063 }
1064 }
1065 }
1066
1067 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
1068 }
1069
1070 /*
1071 * The primary or secondary register operand is reported in iReg2 depending
1072 * on whether the primary operand is in read/write form.
1073 */
1074 uint8_t idxReg2;
1075 if (fPrimaryOpRead)
1076 {
1077 idxReg2 = bRm & X86_MODRM_RM_MASK;
1078 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
1079 idxReg2 |= pVCpu->iem.s.uRexB;
1080 }
1081 else
1082 {
1083 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK;
1084 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
1085 idxReg2 |= pVCpu->iem.s.uRexReg;
1086 }
1087 ExitInstrInfo.All.u2Scaling = uScale;
1088 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */
1089 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
1090 ExitInstrInfo.All.fIsRegOperand = 0;
1091 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
1092 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
1093 ExitInstrInfo.All.iIdxReg = iIdxReg;
1094 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
1095 ExitInstrInfo.All.iBaseReg = iBaseReg;
1096 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
1097 ExitInstrInfo.All.iReg2 = idxReg2;
1098 }
1099
1100 /*
1101 * Handle exceptions to the norm for certain instructions.
1102 * (e.g. some instructions convey an instruction identity in place of iReg2).
1103 */
1104 switch (uExitReason)
1105 {
1106 case VMX_EXIT_GDTR_IDTR_ACCESS:
1107 {
1108 Assert(VMXINSTRID_IS_VALID(uInstrId));
1109 ExitInstrInfo.GdtIdt.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
1110 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
1111 break;
1112 }
1113
1114 case VMX_EXIT_LDTR_TR_ACCESS:
1115 {
1116 Assert(VMXINSTRID_IS_VALID(uInstrId));
1117 ExitInstrInfo.LdtTr.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
1118 ExitInstrInfo.LdtTr.u2Undef0 = 0;
1119 break;
1120 }
1121
1122 case VMX_EXIT_RDRAND:
1123 case VMX_EXIT_RDSEED:
1124 {
1125 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
1126 break;
1127 }
1128 }
1129
1130 /* Update displacement and return the constructed VM-exit instruction information field. */
1131 if (pGCPtrDisp)
1132 *pGCPtrDisp = GCPtrDisp;
1133 return ExitInstrInfo.u;
1134}
1135
1136
1137/**
1138 * Implements VMSucceed for VMX instruction success.
1139 *
1140 * @param pVCpu The cross context virtual CPU structure.
1141 */
1142DECL_FORCE_INLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
1143{
1144 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1145}
1146
1147
1148/**
1149 * Implements VMFailInvalid for VMX instruction failure.
1150 *
1151 * @param pVCpu The cross context virtual CPU structure.
1152 */
1153DECL_FORCE_INLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
1154{
1155 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1156 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
1157}
1158
1159
1160/**
1161 * Implements VMFailValid for VMX instruction failure.
1162 *
1163 * @param pVCpu The cross context virtual CPU structure.
1164 * @param enmInsErr The VM instruction error.
1165 */
1166DECL_FORCE_INLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1167{
1168 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1169 {
1170 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1171 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
1172 /** @todo NSTVMX: VMWrite enmInsErr to VM-instruction error field. */
1173 RT_NOREF(enmInsErr);
1174 }
1175}
1176
1177
1178/**
1179 * Implements VMFail for VMX instruction failure.
1180 *
1181 * @param pVCpu The cross context virtual CPU structure.
1182 * @param enmInsErr The VM instruction error.
1183 */
1184DECL_FORCE_INLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1185{
1186 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1187 {
1188 iemVmxVmFailValid(pVCpu, enmInsErr);
1189 /** @todo Set VM-instruction error field in the current virtual-VMCS. */
1190 }
1191 else
1192 iemVmxVmFailInvalid(pVCpu);
1193}
1194
1195
1196/**
1197 * Flushes the current VMCS contents back to guest memory.
1198 *
1199 * @returns VBox status code.
1200 * @param pVCpu The cross context virtual CPU structure.
1201 */
1202DECL_FORCE_INLINE(int) iemVmxCommitCurrentVmcsToMemory(PVMCPU pVCpu)
1203{
1204 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1205 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
1206 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));
1207 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
1208 return rc;
1209}
1210
1211
1212/**
1213 * Implements VMSucceed for the VMREAD instruction and increments the guest RIP.
1214 *
1215 * @param pVCpu The cross context virtual CPU structure.
1216 */
1217DECL_FORCE_INLINE(void) iemVmxVmreadSuccess(PVMCPU pVCpu, uint8_t cbInstr)
1218{
1219 iemVmxVmSucceed(pVCpu);
1220 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1221}
1222
1223
1224/**
1225 * VMREAD common (memory/register) instruction execution worker
1226 *
1227 * @param pVCpu The cross context virtual CPU structure.
1228 * @param cbInstr The instruction length.
1229 * @param pu64Dst Where to write the VMCS value (only updated when
1230 * VINF_SUCCESS is returned).
1231 * @param u64FieldEnc The VMCS field encoding.
1232 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1233 * be NULL.
1234 */
1235IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
1236 PCVMXVEXITINFO pExitInfo)
1237{
1238 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1239 {
1240 RT_NOREF(pExitInfo); RT_NOREF(cbInstr);
1241 /** @todo NSTVMX: intercept. */
1242 /** @todo NSTVMX: VMCS shadowing intercept (VMREAD bitmap). */
1243 }
1244
1245 /* CPL. */
1246 if (pVCpu->iem.s.uCpl > 0)
1247 {
1248 Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1249 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Cpl;
1250 return iemRaiseGeneralProtectionFault0(pVCpu);
1251 }
1252
1253 /* VMCS pointer in root mode. */
1254 if ( IEM_IS_VMX_ROOT_MODE(pVCpu)
1255 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1256 {
1257 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
1258 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrInvalid;
1259 iemVmxVmFailInvalid(pVCpu);
1260 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1261 return VINF_SUCCESS;
1262 }
1263
1264 /* VMCS-link pointer in non-root mode. */
1265 if ( IEM_IS_VMX_NON_ROOT_MODE(pVCpu)
1266 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
1267 {
1268 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
1269 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_LinkPtrInvalid;
1270 iemVmxVmFailInvalid(pVCpu);
1271 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1272 return VINF_SUCCESS;
1273 }
1274
1275 /* Supported VMCS field. */
1276 if (iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
1277 {
1278 Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
1279 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_FieldInvalid;
1280 iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
1281 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1282 return VINF_SUCCESS;
1283 }
1284
1285 /*
1286 * Setup reading from the current or shadow VMCS.
1287 */
1288 uint8_t *pbVmcs;
1289 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1290 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
1291 else
1292 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1293 Assert(pbVmcs);
1294
1295 VMXVMCSFIELDENC FieldEnc;
1296 FieldEnc.u = RT_LO_U32(u64FieldEnc);
1297 uint8_t const uWidth = FieldEnc.n.u2Width;
1298 uint8_t const uType = FieldEnc.n.u2Type;
1299 uint8_t const uWidthType = (uWidth << 2) | uType;
1300 uint8_t const uIndex = FieldEnc.n.u8Index;
1301 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
1302 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
1303
1304 /*
1305 * Read the VMCS component based on the field's effective width.
1306 *
1307 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
1308 * indicates high bits (little endian).
1309 *
1310 * Note! The caller is responsible to trim the result and update registers
1311 * or memory locations are required. Here we just zero-extend to the largest
1312 * type (i.e. 64-bits).
1313 */
1314 uint8_t *pbField = pbVmcs + offField;
1315 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
1316 switch (uEffWidth)
1317 {
1318 case VMX_VMCS_ENC_WIDTH_64BIT:
1319 case VMX_VMCS_ENC_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break;
1320 case VMX_VMCS_ENC_WIDTH_32BIT: *pu64Dst = *(uint32_t *)pbField; break;
1321 case VMX_VMCS_ENC_WIDTH_16BIT: *pu64Dst = *(uint16_t *)pbField; break;
1322 }
1323 return VINF_SUCCESS;
1324}
1325
1326
1327/**
1328 * VMREAD (64-bit register) instruction execution worker.
1329 *
1330 * @param pVCpu The cross context virtual CPU structure.
1331 * @param cbInstr The instruction length.
1332 * @param pu64Dst Where to store the VMCS field's value.
1333 * @param u64FieldEnc The VMCS field encoding.
1334 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1335 * be NULL.
1336 */
1337IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
1338 PCVMXVEXITINFO pExitInfo)
1339{
1340 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
1341 if (rcStrict == VINF_SUCCESS)
1342 {
1343 iemVmxVmreadSuccess(pVCpu, cbInstr);
1344 return VINF_SUCCESS;
1345 }
1346
1347 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1348 return rcStrict;
1349}
1350
1351
1352/**
1353 * VMREAD (32-bit register) instruction execution worker.
1354 *
1355 * @param pVCpu The cross context virtual CPU structure.
1356 * @param cbInstr The instruction length.
1357 * @param pu32Dst Where to store the VMCS field's value.
1358 * @param u32FieldEnc The VMCS field encoding.
1359 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1360 * be NULL.
1361 */
1362IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPU pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint64_t u32FieldEnc,
1363 PCVMXVEXITINFO pExitInfo)
1364{
1365 uint64_t u64Dst;
1366 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u32FieldEnc, pExitInfo);
1367 if (rcStrict == VINF_SUCCESS)
1368 {
1369 *pu32Dst = u64Dst;
1370 iemVmxVmreadSuccess(pVCpu, cbInstr);
1371 return VINF_SUCCESS;
1372 }
1373
1374 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1375 return rcStrict;
1376}
1377
1378
1379/**
1380 * VMREAD (memory) instruction execution worker.
1381 *
1382 * @param pVCpu The cross context virtual CPU structure.
1383 * @param cbInstr The instruction length.
1384 * @param iEffSeg The effective segment register to use with @a u64Val.
1385 * Pass UINT8_MAX if it is a register access.
1386 * @param enmEffAddrMode The effective addressing mode (only used with memory
1387 * operand).
1388 * @param GCPtrDst The guest linear address to store the VMCS field's
1389 * value.
1390 * @param u64FieldEnc The VMCS field encoding.
1391 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1392 * be NULL.
1393 */
1394IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode,
1395 RTGCPTR GCPtrDst, uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
1396{
1397 uint64_t u64Dst;
1398 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u64FieldEnc, pExitInfo);
1399 if (rcStrict == VINF_SUCCESS)
1400 {
1401 /*
1402 * Write the VMCS field's value to the location specified in guest-memory.
1403 *
1404 * The pointer size depends on the address size (address-size prefix allowed).
1405 * The operand size depends on IA-32e mode (operand-size prefix not allowed).
1406 */
1407 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
1408 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
1409 GCPtrDst &= s_auAddrSizeMasks[enmEffAddrMode];
1410
1411 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1412 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
1413 else
1414 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
1415 if (rcStrict == VINF_SUCCESS)
1416 {
1417 iemVmxVmreadSuccess(pVCpu, cbInstr);
1418 return VINF_SUCCESS;
1419 }
1420
1421 Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
1422 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrMap;
1423 return rcStrict;
1424 }
1425
1426 Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1427 return rcStrict;
1428}
1429
1430
1431/**
1432 * VMWRITE instruction execution worker.
1433 *
1434 * @param pVCpu The cross context virtual CPU structure.
1435 * @param cbInstr The instruction length.
1436 * @param iEffSeg The effective segment register to use with @a u64Val.
1437 * Pass UINT8_MAX if it is a register access.
1438 * @param enmEffAddrMode The effective addressing mode (only used with memory
1439 * operand).
1440 * @param u64Val The value to write (or guest linear address to the
1441 * value), @a iEffSeg will indicate if it's a memory
1442 * operand.
1443 * @param u64FieldEnc The VMCS field encoding.
1444 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1445 * be NULL.
1446 */
1447IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, uint64_t u64Val,
1448 uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
1449{
1450 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1451 {
1452 RT_NOREF(pExitInfo);
1453 /** @todo NSTVMX: intercept. */
1454 /** @todo NSTVMX: VMCS shadowing intercept (VMWRITE bitmap). */
1455 }
1456
1457 /* CPL. */
1458 if (pVCpu->iem.s.uCpl > 0)
1459 {
1460 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1461 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Cpl;
1462 return iemRaiseGeneralProtectionFault0(pVCpu);
1463 }
1464
1465 /* VMCS pointer in root mode. */
1466 if ( IEM_IS_VMX_ROOT_MODE(pVCpu)
1467 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1468 {
1469 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
1470 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrInvalid;
1471 iemVmxVmFailInvalid(pVCpu);
1472 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1473 return VINF_SUCCESS;
1474 }
1475
1476 /* VMCS-link pointer in non-root mode. */
1477 if ( IEM_IS_VMX_NON_ROOT_MODE(pVCpu)
1478 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
1479 {
1480 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
1481 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_LinkPtrInvalid;
1482 iemVmxVmFailInvalid(pVCpu);
1483 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1484 return VINF_SUCCESS;
1485 }
1486
1487 /* If the VMWRITE instruction references memory, access the specified memory operand. */
1488 bool const fIsRegOperand = iEffSeg == UINT8_MAX;
1489 if (!fIsRegOperand)
1490 {
1491 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
1492 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
1493 RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[enmEffAddrMode];
1494
1495 /* Read the value from the specified guest memory location. */
1496 VBOXSTRICTRC rcStrict;
1497 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1498 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
1499 else
1500 {
1501 uint32_t u32Val;
1502 rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, iEffSeg, GCPtrVal);
1503 u64Val = u32Val;
1504 }
1505 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1506 {
1507 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
1508 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrMap;
1509 return rcStrict;
1510 }
1511 }
1512 else
1513 Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
1514
1515 /* Supported VMCS field. */
1516 if (!iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
1517 {
1518 Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
1519 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldInvalid;
1520 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
1521 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1522 return VINF_SUCCESS;
1523 }
1524
1525 /* Read-only VMCS field. */
1526 bool const fReadOnlyField = HMVmxIsVmcsFieldReadOnly(u64FieldEnc);
1527 if ( fReadOnlyField
1528 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
1529 {
1530 Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64FieldEnc));
1531 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldRo;
1532 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
1533 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1534 return VINF_SUCCESS;
1535 }
1536
1537 /*
1538 * Setup writing to the current or shadow VMCS.
1539 */
1540 uint8_t *pbVmcs;
1541 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1542 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
1543 else
1544 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1545 Assert(pbVmcs);
1546
1547 VMXVMCSFIELDENC FieldEnc;
1548 FieldEnc.u = RT_LO_U32(u64FieldEnc);
1549 uint8_t const uWidth = FieldEnc.n.u2Width;
1550 uint8_t const uType = FieldEnc.n.u2Type;
1551 uint8_t const uWidthType = (uWidth << 2) | uType;
1552 uint8_t const uIndex = FieldEnc.n.u8Index;
1553 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
1554 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
1555
1556 /*
1557 * Write the VMCS component based on the field's effective width.
1558 *
1559 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
1560 * indicates high bits (little endian).
1561 */
1562 uint8_t *pbField = pbVmcs + offField;
1563 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
1564 switch (uEffWidth)
1565 {
1566 case VMX_VMCS_ENC_WIDTH_64BIT:
1567 case VMX_VMCS_ENC_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
1568 case VMX_VMCS_ENC_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break;
1569 case VMX_VMCS_ENC_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break;
1570 }
1571
1572 iemVmxVmSucceed(pVCpu);
1573 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1574 return VINF_SUCCESS;
1575}
1576
1577
1578/**
1579 * VMCLEAR instruction execution worker.
1580 *
1581 * @param pVCpu The cross context virtual CPU structure.
1582 * @param cbInstr The instruction length.
1583 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
1584 * @param GCPtrVmcs The linear address of the VMCS pointer.
1585 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1586 * be NULL.
1587 *
1588 * @remarks Common VMX instruction checks are already expected to by the caller,
1589 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1590 */
1591IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1592 PCVMXVEXITINFO pExitInfo)
1593{
1594 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1595 {
1596 RT_NOREF(pExitInfo);
1597 /** @todo NSTVMX: intercept. */
1598 }
1599 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1600
1601 /* CPL. */
1602 if (pVCpu->iem.s.uCpl > 0)
1603 {
1604 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1605 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Cpl;
1606 return iemRaiseGeneralProtectionFault0(pVCpu);
1607 }
1608
1609 /* Get the VMCS pointer from the location specified by the source memory operand. */
1610 RTGCPHYS GCPhysVmcs;
1611 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
1612 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1613 {
1614 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
1615 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrMap;
1616 return rcStrict;
1617 }
1618
1619 /* VMCS pointer alignment. */
1620 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
1621 {
1622 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
1623 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAlign;
1624 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1625 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1626 return VINF_SUCCESS;
1627 }
1628
1629 /* VMCS physical-address width limits. */
1630 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
1631 {
1632 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
1633 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrWidth;
1634 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1635 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1636 return VINF_SUCCESS;
1637 }
1638
1639 /* VMCS is not the VMXON region. */
1640 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
1641 {
1642 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
1643 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrVmxon;
1644 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
1645 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1646 return VINF_SUCCESS;
1647 }
1648
1649 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
1650 restriction imposed by our implementation. */
1651 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
1652 {
1653 Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
1654 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAbnormal;
1655 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1656 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1657 return VINF_SUCCESS;
1658 }
1659
1660 /*
1661 * VMCLEAR allows committing and clearing any valid VMCS pointer.
1662 *
1663 * If the current VMCS is the one being cleared, set its state to 'clear' and commit
1664 * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
1665 * to 'clear'.
1666 */
1667 uint8_t const fVmcsStateClear = VMX_V_VMCS_STATE_CLEAR;
1668 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
1669 {
1670 Assert(GCPhysVmcs != NIL_RTGCPHYS); /* Paranoia. */
1671 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
1672 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsStateClear;
1673 iemVmxCommitCurrentVmcsToMemory(pVCpu);
1674 Assert(!IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1675 }
1676 else
1677 {
1678 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPtrVmcs + RT_OFFSETOF(VMXVVMCS, fVmcsState),
1679 (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear));
1680 }
1681
1682 iemVmxVmSucceed(pVCpu);
1683 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1684 return rcStrict;
1685}
1686
1687
1688/**
1689 * VMPTRST instruction execution worker.
1690 *
1691 * @param pVCpu The cross context virtual CPU structure.
1692 * @param cbInstr The instruction length.
1693 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
1694 * @param GCPtrVmcs The linear address of where to store the current VMCS
1695 * pointer.
1696 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1697 * be NULL.
1698 *
1699 * @remarks Common VMX instruction checks are already expected to by the caller,
1700 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1701 */
1702IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1703 PCVMXVEXITINFO pExitInfo)
1704{
1705 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1706 {
1707 RT_NOREF(pExitInfo);
1708 /** @todo NSTVMX: intercept. */
1709 }
1710 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1711
1712 /* CPL. */
1713 if (pVCpu->iem.s.uCpl > 0)
1714 {
1715 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1716 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Cpl;
1717 return iemRaiseGeneralProtectionFault0(pVCpu);
1718 }
1719
1720 /* Set the VMCS pointer to the location specified by the destination memory operand. */
1721 AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
1722 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
1723 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1724 {
1725 iemVmxVmSucceed(pVCpu);
1726 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1727 return rcStrict;
1728 }
1729
1730 Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1731 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_PtrMap;
1732 return rcStrict;
1733}
1734
1735
1736/**
1737 * VMPTRLD instruction execution worker.
1738 *
1739 * @param pVCpu The cross context virtual CPU structure.
1740 * @param cbInstr The instruction length.
1741 * @param GCPtrVmcs The linear address of the current VMCS pointer.
1742 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1743 * be NULL.
1744 *
1745 * @remarks Common VMX instruction checks are already expected to by the caller,
1746 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1747 */
1748IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1749 PCVMXVEXITINFO pExitInfo)
1750{
1751 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1752 {
1753 RT_NOREF(pExitInfo);
1754 /** @todo NSTVMX: intercept. */
1755 }
1756 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1757
1758 /* CPL. */
1759 if (pVCpu->iem.s.uCpl > 0)
1760 {
1761 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1762 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Cpl;
1763 return iemRaiseGeneralProtectionFault0(pVCpu);
1764 }
1765
1766 /* Get the VMCS pointer from the location specified by the source memory operand. */
1767 RTGCPHYS GCPhysVmcs;
1768 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
1769 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1770 {
1771 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
1772 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrMap;
1773 return rcStrict;
1774 }
1775
1776 /* VMCS pointer alignment. */
1777 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
1778 {
1779 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
1780 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAlign;
1781 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1782 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1783 return VINF_SUCCESS;
1784 }
1785
1786 /* VMCS physical-address width limits. */
1787 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
1788 {
1789 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
1790 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrWidth;
1791 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1792 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1793 return VINF_SUCCESS;
1794 }
1795
1796 /* VMCS is not the VMXON region. */
1797 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
1798 {
1799 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
1800 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrVmxon;
1801 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
1802 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1803 return VINF_SUCCESS;
1804 }
1805
1806 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
1807 restriction imposed by our implementation. */
1808 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
1809 {
1810 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
1811 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAbnormal;
1812 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1813 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1814 return VINF_SUCCESS;
1815 }
1816
1817 /* Read the VMCS revision ID from the VMCS. */
1818 VMXVMCSREVID VmcsRevId;
1819 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
1820 if (RT_FAILURE(rc))
1821 {
1822 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
1823 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrReadPhys;
1824 return rc;
1825 }
1826
1827 /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
1828 also check VMCS shadowing feature. */
1829 if ( VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
1830 || ( VmcsRevId.n.fIsShadowVmcs
1831 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
1832 {
1833 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
1834 {
1835 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
1836 VmcsRevId.n.u31RevisionId));
1837 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_VmcsRevId;
1838 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
1839 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1840 return VINF_SUCCESS;
1841 }
1842
1843 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
1844 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_ShadowVmcs;
1845 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
1846 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1847 return VINF_SUCCESS;
1848 }
1849
1850 /*
1851 * We only maintain only the current VMCS in our virtual CPU context (CPUMCTX). Therefore,
1852 * VMPTRLD shall always flush any existing current VMCS back to guest memory before loading
1853 * a new VMCS as current.
1854 */
1855 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
1856 {
1857 iemVmxCommitCurrentVmcsToMemory(pVCpu);
1858 IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
1859 }
1860
1861 iemVmxVmSucceed(pVCpu);
1862 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1863 return VINF_SUCCESS;
1864}
1865
1866
1867/**
1868 * VMXON instruction execution worker.
1869 *
1870 * @param pVCpu The cross context virtual CPU structure.
1871 * @param cbInstr The instruction length.
1872 * @param iEffSeg The effective segment register to use with @a
1873 * GCPtrVmxon.
1874 * @param GCPtrVmxon The linear address of the VMXON pointer.
1875 * @param pExitInfo Pointer to the VM-exit instruction information struct.
1876 * Optional, can be NULL.
1877 *
1878 * @remarks Common VMX instruction checks are already expected to by the caller,
1879 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1880 */
1881IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
1882 PCVMXVEXITINFO pExitInfo)
1883{
1884#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1885 RT_NOREF5(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
1886 return VINF_EM_RAW_EMULATE_INSTR;
1887#else
1888 if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
1889 {
1890 /* CPL. */
1891 if (pVCpu->iem.s.uCpl > 0)
1892 {
1893 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1894 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cpl;
1895 return iemRaiseGeneralProtectionFault0(pVCpu);
1896 }
1897
1898 /* A20M (A20 Masked) mode. */
1899 if (!PGMPhysIsA20Enabled(pVCpu))
1900 {
1901 Log(("vmxon: A20M mode -> #GP(0)\n"));
1902 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_A20M;
1903 return iemRaiseGeneralProtectionFault0(pVCpu);
1904 }
1905
1906 /* CR0 fixed bits. */
1907 bool const fUnrestrictedGuest = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxUnrestrictedGuest;
1908 uint64_t const uCr0Fixed0 = fUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX : VMX_V_CR0_FIXED0;
1909 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
1910 {
1911 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
1912 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed0;
1913 return iemRaiseGeneralProtectionFault0(pVCpu);
1914 }
1915
1916 /* CR4 fixed bits. */
1917 if ((pVCpu->cpum.GstCtx.cr4 & VMX_V_CR4_FIXED0) != VMX_V_CR4_FIXED0)
1918 {
1919 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
1920 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed0;
1921 return iemRaiseGeneralProtectionFault0(pVCpu);
1922 }
1923
1924 /* Feature control MSR's LOCK and VMXON bits. */
1925 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
1926 if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
1927 {
1928 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
1929 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_MsrFeatCtl;
1930 return iemRaiseGeneralProtectionFault0(pVCpu);
1931 }
1932
1933 /* Get the VMXON pointer from the location specified by the source memory operand. */
1934 RTGCPHYS GCPhysVmxon;
1935 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
1936 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1937 {
1938 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
1939 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrMap;
1940 return rcStrict;
1941 }
1942
1943 /* VMXON region pointer alignment. */
1944 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
1945 {
1946 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
1947 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAlign;
1948 iemVmxVmFailInvalid(pVCpu);
1949 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1950 return VINF_SUCCESS;
1951 }
1952
1953 /* VMXON physical-address width limits. */
1954 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
1955 {
1956 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
1957 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrWidth;
1958 iemVmxVmFailInvalid(pVCpu);
1959 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1960 return VINF_SUCCESS;
1961 }
1962
1963 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
1964 restriction imposed by our implementation. */
1965 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
1966 {
1967 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
1968 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAbnormal;
1969 iemVmxVmFailInvalid(pVCpu);
1970 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1971 return VINF_SUCCESS;
1972 }
1973
1974 /* Read the VMCS revision ID from the VMXON region. */
1975 VMXVMCSREVID VmcsRevId;
1976 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
1977 if (RT_FAILURE(rc))
1978 {
1979 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
1980 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrReadPhys;
1981 return rc;
1982 }
1983
1984 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
1985 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
1986 {
1987 /* Revision ID mismatch. */
1988 if (!VmcsRevId.n.fIsShadowVmcs)
1989 {
1990 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
1991 VmcsRevId.n.u31RevisionId));
1992 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmcsRevId;
1993 iemVmxVmFailInvalid(pVCpu);
1994 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1995 return VINF_SUCCESS;
1996 }
1997
1998 /* Shadow VMCS disallowed. */
1999 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
2000 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_ShadowVmcs;
2001 iemVmxVmFailInvalid(pVCpu);
2002 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2003 return VINF_SUCCESS;
2004 }
2005
2006 /*
2007 * Record that we're in VMX operation, block INIT, block and disable A20M.
2008 */
2009 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
2010 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
2011 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
2012
2013 /** @todo NSTVMX: clear address-range monitoring. */
2014 /** @todo NSTVMX: Intel PT. */
2015
2016 iemVmxVmSucceed(pVCpu);
2017 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2018# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
2019 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
2020# else
2021 return VINF_SUCCESS;
2022# endif
2023 }
2024 else if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
2025 {
2026 RT_NOREF(pExitInfo);
2027 /** @todo NSTVMX: intercept. */
2028 }
2029
2030 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
2031
2032 /* CPL. */
2033 if (pVCpu->iem.s.uCpl > 0)
2034 {
2035 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
2036 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxRootCpl;
2037 return iemRaiseGeneralProtectionFault0(pVCpu);
2038 }
2039
2040 /* VMXON when already in VMX root mode. */
2041 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
2042 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxAlreadyRoot;
2043 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2044 return VINF_SUCCESS;
2045#endif
2046}
2047
2048
2049/**
2050 * Gets the instruction diagnostic for segment base checks during VM-entry of a
2051 * nested-guest.
2052 *
2053 * @param iSegReg The segment index (X86_SREG_XXX).
2054 */
2055IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBase(unsigned iSegReg)
2056{
2057 switch (iSegReg)
2058 {
2059 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseCs;
2060 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseDs;
2061 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseEs;
2062 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseFs;
2063 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseGs;
2064 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseSs;
2065 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
2066 }
2067}
2068
2069
2070/**
2071 * Gets the instruction diagnostic for segment base checks during VM-entry of a
2072 * nested-guest that is in Virtual-8086 mode.
2073 *
2074 * @param iSegReg The segment index (X86_SREG_XXX).
2075 */
2076IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBaseV86(unsigned iSegReg)
2077{
2078 switch (iSegReg)
2079 {
2080 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseV86Cs;
2081 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ds;
2082 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseV86Es;
2083 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseV86Fs;
2084 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseV86Gs;
2085 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ss;
2086 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
2087 }
2088}
2089
2090
2091/**
2092 * Gets the instruction diagnostic for segment limit checks during VM-entry of a
2093 * nested-guest that is in Virtual-8086 mode.
2094 *
2095 * @param iSegReg The segment index (X86_SREG_XXX).
2096 */
2097IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegLimitV86(unsigned iSegReg)
2098{
2099 switch (iSegReg)
2100 {
2101 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegLimitV86Cs;
2102 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ds;
2103 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegLimitV86Es;
2104 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegLimitV86Fs;
2105 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegLimitV86Gs;
2106 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ss;
2107 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
2108 }
2109}
2110
2111
2112/**
2113 * Gets the instruction diagnostic for segment attribute checks during VM-entry of a
2114 * nested-guest that is in Virtual-8086 mode.
2115 *
2116 * @param iSegReg The segment index (X86_SREG_XXX).
2117 */
2118IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrV86(unsigned iSegReg)
2119{
2120 switch (iSegReg)
2121 {
2122 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrV86Cs;
2123 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ds;
2124 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrV86Es;
2125 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrV86Fs;
2126 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrV86Gs;
2127 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ss;
2128 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
2129 }
2130}
2131
2132
2133/**
2134 * Gets the instruction diagnostic for segment attributes reserved bits failure
2135 * during VM-entry of a nested-guest.
2136 *
2137 * @param iSegReg The segment index (X86_SREG_XXX).
2138 */
2139IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrRsvd(unsigned iSegReg)
2140{
2141 switch (iSegReg)
2142 {
2143 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdCs;
2144 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdDs;
2145 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrRsvdEs;
2146 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdFs;
2147 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdGs;
2148 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdSs;
2149 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
2150 }
2151}
2152
2153
2154/**
2155 * Gets the instruction diagnostic for segment attributes descriptor-type
2156 * (code/segment or system) failure during VM-entry of a nested-guest.
2157 *
2158 * @param iSegReg The segment index (X86_SREG_XXX).
2159 */
2160IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDescType(unsigned iSegReg)
2161{
2162 switch (iSegReg)
2163 {
2164 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeCs;
2165 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeDs;
2166 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeEs;
2167 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeFs;
2168 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeGs;
2169 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs;
2170 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
2171 }
2172}
2173
2174
2175/**
2176 * Gets the instruction diagnostic for segment attributes descriptor-type
2177 * (code/segment or system) failure during VM-entry of a nested-guest.
2178 *
2179 * @param iSegReg The segment index (X86_SREG_XXX).
2180 */
2181IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrPresent(unsigned iSegReg)
2182{
2183 switch (iSegReg)
2184 {
2185 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrPresentCs;
2186 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrPresentDs;
2187 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrPresentEs;
2188 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrPresentFs;
2189 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrPresentGs;
2190 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrPresentSs;
2191 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
2192 }
2193}
2194
2195
2196/**
2197 * Gets the instruction diagnostic for segment attribute granularity failure during
2198 * VM-entry of a nested-guest.
2199 *
2200 * @param iSegReg The segment index (X86_SREG_XXX).
2201 */
2202IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrGran(unsigned iSegReg)
2203{
2204 switch (iSegReg)
2205 {
2206 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrGranCs;
2207 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrGranDs;
2208 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrGranEs;
2209 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrGranFs;
2210 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrGranGs;
2211 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrGranSs;
2212 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
2213 }
2214}
2215
2216/**
2217 * Gets the instruction diagnostic for segment attribute DPL/RPL failure during
2218 * VM-entry of a nested-guest.
2219 *
2220 * @param iSegReg The segment index (X86_SREG_XXX).
2221 */
2222IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDplRpl(unsigned iSegReg)
2223{
2224 switch (iSegReg)
2225 {
2226 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplCs;
2227 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplDs;
2228 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDplRplEs;
2229 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplFs;
2230 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplGs;
2231 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplSs;
2232 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
2233 }
2234}
2235
2236
2237/**
2238 * Gets the instruction diagnostic for segment attribute type accessed failure
2239 * during VM-entry of a nested-guest.
2240 *
2241 * @param iSegReg The segment index (X86_SREG_XXX).
2242 */
2243IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrTypeAcc(unsigned iSegReg)
2244{
2245 switch (iSegReg)
2246 {
2247 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccCs;
2248 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccDs;
2249 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccEs;
2250 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccFs;
2251 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccGs;
2252 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs;
2253 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
2254 }
2255}
2256
2257
2258/**
2259 * Gets the instruction diagnostic for CR3 referenced PDPTE reserved bits failure
2260 * during VM-entry of a nested-guest.
2261 *
2262 * @param iSegReg The PDPTE entry index.
2263 */
2264IEM_STATIC VMXVDIAG iemVmxGetDiagVmentryPdpteRsvd(unsigned iPdpte)
2265{
2266 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
2267 switch (iPdpte)
2268 {
2269 case 0: return kVmxVDiag_Vmentry_GuestPdpte0Rsvd;
2270 case 1: return kVmxVDiag_Vmentry_GuestPdpte1Rsvd;
2271 case 2: return kVmxVDiag_Vmentry_GuestPdpte2Rsvd;
2272 case 3: return kVmxVDiag_Vmentry_GuestPdpte3Rsvd;
2273 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
2274 }
2275}
2276
2277
2278/**
2279 * Checks guest control registers, debug registers and MSRs as part of VM-entry.
2280 *
2281 * @param pVCpu The cross context virtual CPU structure.
2282 * @param pszInstr The VMX instruction name (for logging purposes).
2283 */
2284IEM_STATIC int iemVmxVmentryCheckGuestControlRegsMsrs(PVMCPU pVCpu, const char *pszInstr)
2285{
2286 /*
2287 * Guest Control Registers, Debug Registers, and MSRs.
2288 * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs".
2289 */
2290 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2291 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
2292 const char *const pszFailure = "VM-exit";
2293
2294 /* CR0 reserved bits. */
2295 {
2296 /* CR0 MB1 bits. */
2297 uint64_t u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
2298 Assert(u64Cr0Fixed0 & (X86_CR0_NW | X86_CR0_CD));
2299 if (fUnrestrictedGuest)
2300 u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
2301 if (~pVmcs->u64GuestCr0.u & u64Cr0Fixed0)
2302 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed0);
2303
2304 /* CR0 MBZ bits. */
2305 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
2306 if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1)
2307 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1);
2308
2309 /* Without unrestricted guest support, VT-x supports does not support unpaged protected mode. */
2310 if ( !fUnrestrictedGuest
2311 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)
2312 && !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
2313 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0PgPe);
2314 }
2315
2316 /* CR4 reserved bits. */
2317 {
2318 /* CR4 MB1 bits. */
2319 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
2320 if (~pVmcs->u64GuestCr4.u & u64Cr4Fixed0)
2321 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0);
2322
2323 /* CR4 MBZ bits. */
2324 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
2325 if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1)
2326 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1);
2327 }
2328
2329 /* DEBUGCTL MSR. */
2330 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
2331 && (pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL))
2332 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDebugCtl);
2333
2334 /* 64-bit CPU checks. */
2335 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
2336 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2337 {
2338 if (fGstInLongMode)
2339 {
2340 /* PAE must be set. */
2341 if ( (pVmcs->u64GuestCr0.u & X86_CR0_PG)
2342 && (pVmcs->u64GuestCr0.u & X86_CR4_PAE))
2343 { /* likely */ }
2344 else
2345 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPae);
2346 }
2347 else
2348 {
2349 /* PCIDE should not be set. */
2350 if (!(pVmcs->u64GuestCr4.u & X86_CR4_PCIDE))
2351 { /* likely */ }
2352 else
2353 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPcide);
2354 }
2355
2356 /* CR3. */
2357 if (!(pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
2358 { /* likely */ }
2359 else
2360 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr3);
2361
2362 /* DR7. */
2363 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
2364 && (pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK))
2365 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDr7);
2366
2367 /* SYSENTER ESP and SYSENTER EIP. */
2368 if ( X86_IS_CANONICAL(pVmcs->u64GuestSysenterEsp.u)
2369 && X86_IS_CANONICAL(pVmcs->u64GuestSysenterEip.u))
2370 { /* likely */ }
2371 else
2372 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSysenterEspEip);
2373 }
2374
2375 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)); /* We don't support loading IA32_PERF_GLOBAL_CTRL MSR yet. */
2376
2377 /* PAT MSR. */
2378 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
2379 && !CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u))
2380 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPatMsr);
2381
2382 /* EFER MSR. */
2383 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
2384 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
2385 && (pVmcs->u64GuestEferMsr.u & ~uValidEferMask))
2386 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsrRsvd);
2387
2388 bool const fGstLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
2389 bool const fGstLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
2390 if ( fGstInLongMode == fGstLma
2391 && ( !(pVmcs->u64GuestCr0.u & X86_CR0_PG)
2392 || fGstLma == fGstLme))
2393 { /* likely */ }
2394 else
2395 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsr);
2396
2397 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR)); /* We don't support loading IA32_BNDCFGS MSR yet. */
2398
2399 NOREF(pszInstr);
2400 NOREF(pszFailure);
2401 return VINF_SUCCESS;
2402}
2403
2404
2405/**
2406 * Checks guest segment registers, LDTR and TR as part of VM-entry.
2407 *
2408 * @param pVCpu The cross context virtual CPU structure.
2409 * @param pszInstr The VMX instruction name (for logging purposes).
2410 */
2411IEM_STATIC int iemVmxVmentryCheckGuestSegRegs(PVMCPU pVCpu, const char *pszInstr)
2412{
2413 /*
2414 * Segment registers.
2415 * See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2416 */
2417 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2418 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);
2419 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
2420 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
2421 const char *const pszFailure = "VM-exit";
2422
2423 /* Selectors. */
2424 if ( !fGstInV86Mode
2425 && !fUnrestrictedGuest
2426 && (pVmcs->GuestSs & X86_SEL_RPL) != (pVmcs->GuestCs & X86_SEL_RPL))
2427 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelCsSsRpl);
2428
2429 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
2430 {
2431 CPUMSELREG SelReg;
2432 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &SelReg);
2433 if (RT_LIKELY(rc == VINF_SUCCESS))
2434 { /* likely */ }
2435 else
2436 return rc;
2437
2438 /*
2439 * Virtual-8086 mode checks.
2440 */
2441 if (fGstInV86Mode)
2442 {
2443 /* Base address. */
2444 if (SelReg.u64Base == (uint64_t)SelReg.Sel << 4)
2445 { /* likely */ }
2446 else
2447 {
2448 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBaseV86(iSegReg);
2449 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2450 }
2451
2452 /* Limit. */
2453 if (SelReg.u32Limit == 0xffff)
2454 { /* likely */ }
2455 else
2456 {
2457 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegLimitV86(iSegReg);
2458 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2459 }
2460
2461 /* Attribute. */
2462 if (SelReg.Attr.u == 0xf3)
2463 { /* likely */ }
2464 else
2465 {
2466 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrV86(iSegReg);
2467 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2468 }
2469
2470 /* We're done; move to checking the next segment. */
2471 continue;
2472 }
2473
2474 /* Checks done by 64-bit CPUs. */
2475 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2476 {
2477 /* Base address. */
2478 if ( iSegReg == X86_SREG_FS
2479 || iSegReg == X86_SREG_GS)
2480 {
2481 if (X86_IS_CANONICAL(SelReg.u64Base))
2482 { /* likely */ }
2483 else
2484 {
2485 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
2486 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2487 }
2488 }
2489 else if (iSegReg == X86_SREG_CS)
2490 {
2491 if (!RT_HI_U32(SelReg.u64Base))
2492 { /* likely */ }
2493 else
2494 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseCs);
2495 }
2496 else
2497 {
2498 if ( SelReg.Attr.n.u1Unusable
2499 || !RT_HI_U32(SelReg.u64Base))
2500 { /* likely */ }
2501 else
2502 {
2503 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
2504 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2505 }
2506 }
2507 }
2508
2509 /*
2510 * Checks outside Virtual-8086 mode.
2511 */
2512 uint8_t const uSegType = SelReg.Attr.n.u4Type;
2513 uint8_t const fCodeDataSeg = SelReg.Attr.n.u1DescType;
2514 uint8_t const fUsable = !SelReg.Attr.n.u1Unusable;
2515 uint8_t const uDpl = SelReg.Attr.n.u2Dpl;
2516 uint8_t const fPresent = SelReg.Attr.n.u1Present;
2517 uint8_t const uGranularity = SelReg.Attr.n.u1Granularity;
2518 uint8_t const uDefBig = SelReg.Attr.n.u1DefBig;
2519 uint8_t const fSegLong = SelReg.Attr.n.u1Long;
2520
2521 /* Code or usable segment. */
2522 if ( iSegReg == X86_SREG_CS
2523 || fUsable)
2524 {
2525 /* Reserved bits (bits 31:17 and bits 11:8). */
2526 if (!(SelReg.Attr.u & 0xfffe0f00))
2527 { /* likely */ }
2528 else
2529 {
2530 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrRsvd(iSegReg);
2531 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2532 }
2533
2534 /* Descriptor type. */
2535 if (fCodeDataSeg)
2536 { /* likely */ }
2537 else
2538 {
2539 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDescType(iSegReg);
2540 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2541 }
2542
2543 /* Present. */
2544 if (fPresent)
2545 { /* likely */ }
2546 else
2547 {
2548 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrPresent(iSegReg);
2549 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2550 }
2551
2552 /* Granularity. */
2553 if ( ((SelReg.u32Limit & 0x00000fff) == 0x00000fff || !uGranularity)
2554 && ((SelReg.u32Limit & 0xfff00000) == 0x00000000 || uGranularity))
2555 { /* likely */ }
2556 else
2557 {
2558 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrGran(iSegReg);
2559 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2560 }
2561 }
2562
2563 if (iSegReg == X86_SREG_CS)
2564 {
2565 /* Segment Type and DPL. */
2566 if ( uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
2567 && fUnrestrictedGuest)
2568 {
2569 if (uDpl == 0)
2570 { /* likely */ }
2571 else
2572 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplZero);
2573 }
2574 else if ( uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_ACCESSED)
2575 || uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
2576 {
2577 X86DESCATTR SsAttr; SsAttr.u = pVmcs->u32GuestSsAttr;
2578 if (uDpl == SsAttr.n.u2Dpl)
2579 { /* likely */ }
2580 else
2581 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplEqSs);
2582 }
2583 else if ((uSegType & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
2584 == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
2585 {
2586 X86DESCATTR SsAttr; SsAttr.u = pVmcs->u32GuestSsAttr;
2587 if (uDpl <= SsAttr.n.u2Dpl)
2588 { /* likely */ }
2589 else
2590 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplLtSs);
2591 }
2592 else
2593 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsType);
2594
2595 /* Def/Big. */
2596 if ( fGstInLongMode
2597 && fSegLong)
2598 {
2599 if (uDefBig == 0)
2600 { /* likely */ }
2601 else
2602 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDefBig);
2603 }
2604 }
2605 else if (iSegReg == X86_SREG_SS)
2606 {
2607 /* Segment Type. */
2608 if ( !fUsable
2609 || uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
2610 || uSegType == (X86_SEL_TYPE_DOWN | X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED))
2611 { /* likely */ }
2612 else
2613 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsType);
2614
2615 /* DPL. */
2616 if (fUnrestrictedGuest)
2617 {
2618 if (uDpl == (SelReg.Sel & X86_SEL_RPL))
2619 { /* likely */ }
2620 else
2621 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplEqRpl);
2622 }
2623 X86DESCATTR CsAttr; CsAttr.u = pVmcs->u32GuestCsAttr;
2624 if ( CsAttr.n.u4Type == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
2625 || (pVmcs->u64GuestCr0.u & X86_CR0_PE))
2626 {
2627 if (uDpl == 0)
2628 { /* likely */ }
2629 else
2630 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplZero);
2631 }
2632 }
2633 else
2634 {
2635 /* DS, ES, FS, GS. */
2636 if (fUsable)
2637 {
2638 /* Segment type. */
2639 if (uSegType & X86_SEL_TYPE_ACCESSED)
2640 { /* likely */ }
2641 else
2642 {
2643 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrTypeAcc(iSegReg);
2644 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2645 }
2646
2647 if ( !(uSegType & X86_SEL_TYPE_CODE)
2648 || (uSegType & X86_SEL_TYPE_READ))
2649 { /* likely */ }
2650 else
2651 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsTypeRead);
2652
2653 /* DPL. */
2654 if ( !fUnrestrictedGuest
2655 && uSegType <= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
2656 {
2657 if (uDpl >= (SelReg.Sel & X86_SEL_RPL))
2658 { /* likely */ }
2659 else
2660 {
2661 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDplRpl(iSegReg);
2662 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2663 }
2664 }
2665 }
2666 }
2667 }
2668
2669 /*
2670 * LDTR.
2671 */
2672 {
2673 CPUMSELREG Ldtr;
2674 Ldtr.Sel = pVmcs->GuestLdtr;
2675 Ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
2676 Ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
2677 Ldtr.Attr.u = pVmcs->u32GuestLdtrLimit;
2678
2679 if (!Ldtr.Attr.n.u1Unusable)
2680 {
2681 /* Selector. */
2682 if (!(Ldtr.Sel & X86_SEL_LDT))
2683 { /* likely */ }
2684 else
2685 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelLdtr);
2686
2687 /* Base. */
2688 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2689 {
2690 if (X86_IS_CANONICAL(Ldtr.u64Base))
2691 { /* likely */ }
2692 else
2693 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseLdtr);
2694 }
2695
2696 /* Attributes. */
2697 /* Reserved bits (bits 31:17 and bits 11:8). */
2698 if (!(Ldtr.Attr.u & 0xfffe0f00))
2699 { /* likely */ }
2700 else
2701 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrRsvd);
2702
2703 if (Ldtr.Attr.n.u4Type == X86_SEL_TYPE_SYS_LDT)
2704 { /* likely */ }
2705 else
2706 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrType);
2707
2708 if (!Ldtr.Attr.n.u1DescType)
2709 { /* likely */ }
2710 else
2711 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrDescType);
2712
2713 if (Ldtr.Attr.n.u1Present)
2714 { /* likely */ }
2715 else
2716 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrPresent);
2717
2718 if ( ((Ldtr.u32Limit & 0x00000fff) == 0x00000fff || !Ldtr.Attr.n.u1Granularity)
2719 && ((Ldtr.u32Limit & 0xfff00000) == 0x00000000 || Ldtr.Attr.n.u1Granularity))
2720 { /* likely */ }
2721 else
2722 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrGran);
2723 }
2724 }
2725
2726 /*
2727 * TR.
2728 */
2729 {
2730 CPUMSELREG Tr;
2731 Tr.Sel = pVmcs->GuestTr;
2732 Tr.u32Limit = pVmcs->u32GuestTrLimit;
2733 Tr.u64Base = pVmcs->u64GuestTrBase.u;
2734 Tr.Attr.u = pVmcs->u32GuestTrLimit;
2735
2736 /* Selector. */
2737 if (!(Tr.Sel & X86_SEL_LDT))
2738 { /* likely */ }
2739 else
2740 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelTr);
2741
2742 /* Base. */
2743 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2744 {
2745 if (X86_IS_CANONICAL(Tr.u64Base))
2746 { /* likely */ }
2747 else
2748 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseTr);
2749 }
2750
2751 /* Attributes. */
2752 /* Reserved bits (bits 31:17 and bits 11:8). */
2753 if (!(Tr.Attr.u & 0xfffe0f00))
2754 { /* likely */ }
2755 else
2756 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrRsvd);
2757
2758 if (!Tr.Attr.n.u1Unusable)
2759 { /* likely */ }
2760 else
2761 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrUnusable);
2762
2763 if ( Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY
2764 || ( !fGstInLongMode
2765 && Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY))
2766 { /* likely */ }
2767 else
2768 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrType);
2769
2770 if (!Tr.Attr.n.u1DescType)
2771 { /* likely */ }
2772 else
2773 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrDescType);
2774
2775 if (Tr.Attr.n.u1Present)
2776 { /* likely */ }
2777 else
2778 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrPresent);
2779
2780 if ( ((Tr.u32Limit & 0x00000fff) == 0x00000fff || !Tr.Attr.n.u1Granularity)
2781 && ((Tr.u32Limit & 0xfff00000) == 0x00000000 || Tr.Attr.n.u1Granularity))
2782 { /* likely */ }
2783 else
2784 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrGran);
2785 }
2786
2787 NOREF(pszInstr);
2788 NOREF(pszFailure);
2789 return VINF_SUCCESS;
2790}
2791
2792
2793/**
2794 * Checks guest GDTR and IDTR as part of VM-entry.
2795 *
2796 * @param pVCpu The cross context virtual CPU structure.
2797 * @param pszInstr The VMX instruction name (for logging purposes).
2798 */
2799IEM_STATIC int iemVmxVmentryCheckGuestGdtrIdtr(PVMCPU pVCpu, const char *pszInstr)
2800{
2801 /*
2802 * GDTR and IDTR.
2803 * See Intel spec. 26.3.1.3 "Checks on Guest Descriptor-Table Registers".
2804 */
2805 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2806 const char *const pszFailure = "VM-exit";
2807 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2808 {
2809 /* Base. */
2810 if (X86_IS_CANONICAL(pVmcs->u64GuestGdtrBase.u))
2811 { /* likely */ }
2812 else
2813 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrBase);
2814
2815 if (X86_IS_CANONICAL(pVmcs->u64GuestIdtrBase.u))
2816 { /* likely */ }
2817 else
2818 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrBase);
2819 }
2820
2821 /* Limit. */
2822 if (!RT_HI_U16(pVmcs->u32GuestGdtrLimit))
2823 { /* likely */ }
2824 else
2825 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrLimit);
2826
2827 if (!RT_HI_U16(pVmcs->u32GuestIdtrLimit))
2828 { /* likely */ }
2829 else
2830 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrLimit);
2831
2832 NOREF(pszInstr);
2833 NOREF(pszFailure);
2834 return VINF_SUCCESS;
2835}
2836
2837
2838/**
2839 * Checks guest RIP and RFLAGS as part of VM-entry.
2840 *
2841 * @param pVCpu The cross context virtual CPU structure.
2842 * @param pszInstr The VMX instruction name (for logging purposes).
2843 */
2844IEM_STATIC int iemVmxVmentryCheckGuestRipRFlags(PVMCPU pVCpu, const char *pszInstr)
2845{
2846 /*
2847 * RIP and RFLAGS.
2848 * See Intel spec. 26.3.1.4 "Checks on Guest RIP and RFLAGS".
2849 */
2850 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2851 const char *const pszFailure = "VM-exit";
2852 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
2853
2854 /* RIP. */
2855 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2856 {
2857 X86DESCATTR CsAttr; CsAttr.u = pVmcs->u32GuestCsAttr;
2858 if ( !fGstInLongMode
2859 || !CsAttr.n.u1Long)
2860 {
2861 if (!RT_HI_U32(pVmcs->u64GuestRip.u))
2862 { /* likely */ }
2863 else
2864 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRipRsvd);
2865 }
2866
2867 if ( fGstInLongMode
2868 && CsAttr.n.u1Long)
2869 {
2870 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth == 48); /* Canonical. */
2871 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth < 64
2872 && X86_IS_CANONICAL(pVmcs->u64GuestRip.u))
2873 { /* likely */ }
2874 else
2875 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRip);
2876 }
2877 }
2878
2879 /* RFLAGS (bits 63:22 (or 31:22), bits 15, 5, 3 are reserved, bit 1 MB1). */
2880 uint64_t const uGuestRFlags = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode ? pVmcs->u64GuestRFlags.u
2881 : pVmcs->u64GuestRFlags.s.Lo;
2882 if ( !(uGuestRFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK))
2883 && (uGuestRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK)
2884 { /* likely */ }
2885 else
2886 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsRsvd);
2887
2888 if ( fGstInLongMode
2889 || !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
2890 {
2891 if (!(uGuestRFlags & X86_EFL_VM))
2892 { /* likely */ }
2893 else
2894 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsVm);
2895 }
2896
2897 if ( VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo)
2898 && VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo) == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
2899 {
2900 if (uGuestRFlags & X86_EFL_IF)
2901 { /* likely */ }
2902 else
2903 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsIf);
2904 }
2905
2906 NOREF(pszInstr);
2907 NOREF(pszFailure);
2908 return VINF_SUCCESS;
2909}
2910
2911
2912/**
2913 * Checks guest non-register state as part of VM-entry.
2914 *
2915 * @param pVCpu The cross context virtual CPU structure.
2916 * @param pszInstr The VMX instruction name (for logging purposes).
2917 */
2918IEM_STATIC int iemVmxVmentryCheckGuestNonRegState(PVMCPU pVCpu, const char *pszInstr)
2919{
2920 /*
2921 * Guest non-register state.
2922 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2923 */
2924 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2925 const char *const pszFailure = "VM-exit";
2926
2927 /*
2928 * Activity state.
2929 */
2930 uint64_t const u64GuestVmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
2931 uint32_t const fActivityStateMask = RT_BF_GET(u64GuestVmxMiscMsr, VMX_BF_MISC_ACTIVITY_STATES);
2932 if (!(pVmcs->u32GuestActivityState & fActivityStateMask))
2933 { /* likely */ }
2934 else
2935 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateRsvd);
2936
2937 X86DESCATTR SsAttr; SsAttr.u = pVmcs->u32GuestSsAttr;
2938
2939 if ( !SsAttr.n.u2Dpl
2940 || pVmcs->u32GuestActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT)
2941 { /* likely */ }
2942 else
2943 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateSsDpl);
2944
2945 if ( pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI
2946 || pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
2947 {
2948 if (pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE)
2949 { /* likely */ }
2950 else
2951 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateStiMovSs);
2952 }
2953
2954 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
2955 {
2956 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
2957 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(pVmcs->u32EntryIntInfo);
2958 AssertCompile(VMX_V_GUEST_ACTIVITY_STATE_MASK == (VMX_VMCS_GUEST_ACTIVITY_HLT | VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN));
2959 switch (pVmcs->u32GuestActivityState)
2960 {
2961 case VMX_VMCS_GUEST_ACTIVITY_HLT:
2962 {
2963 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT
2964 || uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
2965 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
2966 && ( uVector == X86_XCPT_DB
2967 || uVector == X86_XCPT_MC))
2968 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
2969 && uVector == 0))
2970 { /* likely */ }
2971 else
2972 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateHlt);
2973 break;
2974 }
2975
2976 case VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN:
2977 {
2978 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
2979 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
2980 && uVector == X86_XCPT_MC))
2981 { /* likely */ }
2982 else
2983 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateShutdown);
2984 break;
2985 }
2986
2987 case VMX_VMCS_GUEST_ACTIVITY_ACTIVE:
2988 default:
2989 break;
2990 }
2991 }
2992
2993 /*
2994 * Interruptibility state.
2995 */
2996 if (!(pVmcs->u32GuestIntrState & ~VMX_VMCS_GUEST_INT_STATE_MASK))
2997 { /* likely */ }
2998 else
2999 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRsvd);
3000
3001 if ((pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3002 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3003 { /* likely */ }
3004 else
3005 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateStiMovSs);
3006
3007 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_IF)
3008 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3009 { /* likely */ }
3010 else
3011 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRFlagsSti);
3012
3013 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
3014 {
3015 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
3016 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3017 {
3018 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
3019 { /* likely */ }
3020 else
3021 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateExtInt);
3022 }
3023 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
3024 {
3025 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
3026 { /* likely */ }
3027 else
3028 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateNmi);
3029
3030 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
3031 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI))
3032 { /* likely */ }
3033 else
3034 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateVirtNmi);
3035 }
3036 }
3037
3038 /* We don't support SMM yet. So blocking-by-SMIs must not be set. */
3039 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI))
3040 { /* likely */ }
3041 else
3042 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateSmi);
3043
3044 /* We don't support SGX yet. So enclave-interruption must not be set. */
3045 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_ENCLAVE))
3046 { /* likely */ }
3047 else
3048 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateEnclave);
3049
3050 /*
3051 * Pending debug exceptions.
3052 */
3053 uint64_t const uPendingDbgXcpt = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode
3054 ? pVmcs->u64GuestPendingDbgXcpt.u
3055 : pVmcs->u64GuestPendingDbgXcpt.s.Lo;
3056 if (!(uPendingDbgXcpt & ~VMX_VMCS_GUEST_PENDING_DEBUG_VALID_MASK))
3057 { /* likely */ }
3058 else
3059 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRsvd);
3060
3061 if ( (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3062 || pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
3063 {
3064 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_TF)
3065 && !(pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF)
3066 && !(uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
3067 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf);
3068
3069 if ( ( !(pVmcs->u64GuestRFlags.u & X86_EFL_TF)
3070 || (pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF))
3071 && (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
3072 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf);
3073 }
3074
3075 /* We don't support RTM (Real-time Transactional Memory) yet. */
3076 if (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_RTM)
3077 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRtm);
3078
3079 /*
3080 * VMCS link pointer.
3081 */
3082 if (pVmcs->u64VmcsLinkPtr.u != UINT64_C(0xffffffffffffffff))
3083 {
3084 /* We don't support SMM yet (so VMCS link pointer cannot be the current VMCS). */
3085 if (pVmcs->u64VmcsLinkPtr.u != IEM_VMX_GET_CURRENT_VMCS(pVCpu))
3086 { /* likely */ }
3087 else
3088 {
3089 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
3090 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrCurVmcs);
3091 }
3092
3093 /* Validate the address. */
3094 if ( (pVmcs->u64VmcsLinkPtr.u & X86_PAGE_4K_OFFSET_MASK)
3095 || (pVmcs->u64VmcsLinkPtr.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3096 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64VmcsLinkPtr.u))
3097 {
3098 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
3099 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmcsLinkPtr);
3100 }
3101
3102 /* Read the VMCS-link pointer from guest memory. */
3103 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs));
3104 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs),
3105 pVmcs->u64VmcsLinkPtr.u, VMX_V_VMCS_SIZE);
3106 if (RT_FAILURE(rc))
3107 {
3108 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
3109 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys);
3110 }
3111
3112 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
3113 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID)
3114 { /* likely */ }
3115 else
3116 {
3117 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
3118 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrRevId);
3119 }
3120
3121 /* Verify the shadow bit is set if VMCS shadowing is enabled . */
3122 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3123 || pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.fIsShadowVmcs)
3124 { /* likely */ }
3125 else
3126 {
3127 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
3128 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrShadow);
3129 }
3130 }
3131
3132 NOREF(pszInstr);
3133 NOREF(pszFailure);
3134 return VINF_SUCCESS;
3135}
3136
3137
3138/**
3139 * Checks if the PDPTEs referenced by the nested-guest CR3 are valid as part of
3140 * VM-entry.
3141 *
3142 * @returns @c true if all PDPTEs are valid, @c false otherwise.
3143 * @param pVCpu The cross context virtual CPU structure.
3144 * @param pszInstr The VMX instruction name (for logging purposes).
3145 * @param pVmcs Pointer to the virtual VMCS.
3146 */
3147IEM_STATIC int iemVmxVmentryCheckGuestPdptesForCr3(PVMCPU pVCpu, const char *pszInstr, PVMXVVMCS pVmcs)
3148{
3149 /*
3150 * Check PDPTEs.
3151 * See Intel spec. 4.4.1 "PDPTE Registers".
3152 */
3153 uint64_t const uGuestCr3 = pVmcs->u64GuestCr3.u & X86_CR3_PAE_PAGE_MASK;
3154 const char *const pszFailure = "VM-exit";
3155
3156 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
3157 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uGuestCr3, sizeof(aPdptes));
3158 if (RT_SUCCESS(rc))
3159 {
3160 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
3161 {
3162 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
3163 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
3164 { /* likely */ }
3165 else
3166 {
3167 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_PDPTE;
3168 VMXVDIAG const enmDiag = iemVmxGetDiagVmentryPdpteRsvd(iPdpte);
3169 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3170 }
3171 }
3172 }
3173 else
3174 {
3175 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_PDPTE;
3176 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys);
3177 }
3178
3179 NOREF(pszFailure);
3180 return rc;
3181}
3182
3183
3184/**
3185 * Checks guest PDPTEs as part of VM-entry.
3186 *
3187 * @param pVCpu The cross context virtual CPU structure.
3188 * @param pszInstr The VMX instruction name (for logging purposes).
3189 */
3190IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPU pVCpu, const char *pszInstr)
3191{
3192 /*
3193 * Guest PDPTEs.
3194 * See Intel spec. 26.3.1.5 "Checks on Guest Page-Directory-Pointer-Table Entries".
3195 */
3196 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3197 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
3198
3199 /* Check PDPTes if the VM-entry is to a guest using PAE paging. */
3200 int rc;
3201 if ( !fGstInLongMode
3202 && (pVmcs->u64GuestCr4.u & X86_CR4_PAE)
3203 && (pVmcs->u64GuestCr0.u & X86_CR0_PG))
3204 {
3205 /*
3206 * We don't support nested-paging for nested-guests yet.
3207 *
3208 * Without nested-paging for nested-guests, PDPTEs in the VMCS are not used,
3209 * rather we need to check the PDPTEs referenced by the guest CR3.
3210 */
3211 rc = iemVmxVmentryCheckGuestPdptesForCr3(pVCpu, pszInstr, pVmcs);
3212 }
3213 else
3214 rc = VINF_SUCCESS;
3215 return rc;
3216}
3217
3218
3219/**
3220 * Checks guest-state as part of VM-entry.
3221 *
3222 * @returns VBox status code.
3223 * @param pVCpu The cross context virtual CPU structure.
3224 * @param pszInstr The VMX instruction name (for logging purposes).
3225 */
3226IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr)
3227{
3228 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
3229 if (rc == VINF_SUCCESS)
3230 { /* likely */ }
3231 else
3232 return rc;
3233
3234 rc = iemVmxVmentryCheckGuestSegRegs(pVCpu, pszInstr);
3235 if (rc == VINF_SUCCESS)
3236 { /* likely */ }
3237 else
3238 return rc;
3239
3240 rc = iemVmxVmentryCheckGuestGdtrIdtr(pVCpu, pszInstr);
3241 if (rc == VINF_SUCCESS)
3242 { /* likely */ }
3243 else
3244 return rc;
3245
3246 rc = iemVmxVmentryCheckGuestRipRFlags(pVCpu, pszInstr);
3247 if (rc == VINF_SUCCESS)
3248 { /* likely */ }
3249 else
3250 return rc;
3251
3252 rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr);
3253 if (rc == VINF_SUCCESS)
3254 { /* likely */ }
3255 else
3256 return rc;
3257
3258 rc = iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr);
3259 if (rc == VINF_SUCCESS)
3260 { /* likely */ }
3261 else
3262 return rc;
3263
3264 return VINF_SUCCESS;
3265}
3266
3267
3268/**
3269 * Checks host-state as part of VM-entry.
3270 *
3271 * @returns VBox status code.
3272 * @param pVCpu The cross context virtual CPU structure.
3273 * @param pszInstr The VMX instruction name (for logging purposes).
3274 */
3275IEM_STATIC int iemVmxVmentryCheckHostState(PVMCPU pVCpu, const char *pszInstr)
3276{
3277 /*
3278 * Host Control Registers and MSRs.
3279 * See Intel spec. 26.2.2 "Checks on Host Control Registers and MSRs".
3280 */
3281 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3282 const char * const pszFailure = "VMFail";
3283
3284 /* CR0 reserved bits. */
3285 {
3286 /* CR0 MB1 bits. */
3287 uint64_t const u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
3288 if (~pVmcs->u64HostCr0.u & u64Cr0Fixed0)
3289 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0);
3290
3291 /* CR0 MBZ bits. */
3292 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
3293 if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1)
3294 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1);
3295 }
3296
3297 /* CR4 reserved bits. */
3298 {
3299 /* CR4 MB1 bits. */
3300 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
3301 if (~pVmcs->u64HostCr4.u & u64Cr4Fixed0)
3302 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0);
3303
3304 /* CR4 MBZ bits. */
3305 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
3306 if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1)
3307 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1);
3308 }
3309
3310 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3311 {
3312 /* CR3 reserved bits. */
3313 if (!(pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
3314 { /* likely */ }
3315 else
3316 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr3);
3317
3318 /* SYSENTER ESP and SYSENTER EIP. */
3319 if ( X86_IS_CANONICAL(pVmcs->u64HostSysenterEsp.u)
3320 && X86_IS_CANONICAL(pVmcs->u64HostSysenterEip.u))
3321 { /* likely */ }
3322 else
3323 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSysenterEspEip);
3324 }
3325
3326 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PERF_MSR)); /* We don't support loading IA32_PERF_GLOBAL_CTRL MSR yet. */
3327
3328 /* PAT MSR. */
3329 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
3330 || CPUMIsPatMsrValid(pVmcs->u64HostPatMsr.u))
3331 { /* likely */ }
3332 else
3333 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostPatMsr);
3334
3335 /* EFER MSR. */
3336 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
3337 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
3338 || !(pVmcs->u64HostEferMsr.u & ~uValidEferMask))
3339 { /* likely */ }
3340 else
3341 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsrRsvd);
3342
3343 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
3344 bool const fHostLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
3345 bool const fHostLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
3346 if ( fHostInLongMode == fHostLma
3347 && fHostInLongMode == fHostLme)
3348 { /* likely */ }
3349 else
3350 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsr);
3351
3352 /*
3353 * Host Segment and Descriptor-Table Registers.
3354 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
3355 */
3356 /* Selector RPL and TI. */
3357 if ( !(pVmcs->HostCs & (X86_SEL_RPL | X86_SEL_LDT))
3358 && !(pVmcs->HostSs & (X86_SEL_RPL | X86_SEL_LDT))
3359 && !(pVmcs->HostDs & (X86_SEL_RPL | X86_SEL_LDT))
3360 && !(pVmcs->HostEs & (X86_SEL_RPL | X86_SEL_LDT))
3361 && !(pVmcs->HostFs & (X86_SEL_RPL | X86_SEL_LDT))
3362 && !(pVmcs->HostGs & (X86_SEL_RPL | X86_SEL_LDT))
3363 && !(pVmcs->HostTr & (X86_SEL_RPL | X86_SEL_LDT)))
3364 { /* likely */ }
3365 else
3366 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSel);
3367
3368 /* CS and TR selectors cannot be 0. */
3369 if ( pVmcs->HostCs
3370 && pVmcs->HostTr)
3371 { /* likely */ }
3372 else
3373 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCsTr);
3374
3375 /* SS cannot be 0 if 32-bit host. */
3376 if ( fHostInLongMode
3377 || pVmcs->HostSs)
3378 { /* likely */ }
3379 else
3380 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSs);
3381
3382 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3383 {
3384 /* FS, GS, GDTR, IDTR, TR base address. */
3385 if ( X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
3386 && X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
3387 && X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u)
3388 && X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u)
3389 && X86_IS_CANONICAL(pVmcs->u64HostTrBase.u))
3390 { /* likely */ }
3391 else
3392 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSegBase);
3393 }
3394
3395 /*
3396 * Host address-space size for 64-bit CPUs.
3397 * See Intel spec. 26.2.4 "Checks Related to Address-Space Size".
3398 */
3399 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
3400 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3401 {
3402 bool const fCpuInLongMode = CPUMIsGuestInLongMode(pVCpu);
3403
3404 /* Logical processor in IA-32e mode. */
3405 if (fCpuInLongMode)
3406 {
3407 if (fHostInLongMode)
3408 {
3409 /* PAE must be set. */
3410 if (pVmcs->u64HostCr4.u & X86_CR4_PAE)
3411 { /* likely */ }
3412 else
3413 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pae);
3414
3415 /* RIP must be canonical. */
3416 if (X86_IS_CANONICAL(pVmcs->u64HostRip.u))
3417 { /* likely */ }
3418 else
3419 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRip);
3420 }
3421 else
3422 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostLongMode);
3423 }
3424 else
3425 {
3426 /* Logical processor is outside IA-32e mode. */
3427 if ( !fGstInLongMode
3428 && !fHostInLongMode)
3429 {
3430 /* PCIDE should not be set. */
3431 if (!(pVmcs->u64HostCr4.u & X86_CR4_PCIDE))
3432 { /* likely */ }
3433 else
3434 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pcide);
3435
3436 /* The high 32-bits of RIP MBZ. */
3437 if (!pVmcs->u64HostRip.s.Hi)
3438 { /* likely */ }
3439 else
3440 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRipRsvd);
3441 }
3442 else
3443 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongMode);
3444 }
3445 }
3446 else
3447 {
3448 /* Host address-space size for 32-bit CPUs. */
3449 if ( !fGstInLongMode
3450 && !fHostInLongMode)
3451 { /* likely */ }
3452 else
3453 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongModeNoCpu);
3454 }
3455
3456 NOREF(pszInstr);
3457 NOREF(pszFailure);
3458 return VINF_SUCCESS;
3459}
3460
3461
3462/**
3463 * Checks VM-entry controls fields as part of VM-entry.
3464 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
3465 *
3466 * @returns VBox status code.
3467 * @param pVCpu The cross context virtual CPU structure.
3468 * @param pszInstr The VMX instruction name (for logging purposes).
3469 */
3470IEM_STATIC int iemVmxVmentryCheckEntryCtls(PVMCPU pVCpu, const char *pszInstr)
3471{
3472 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3473 const char * const pszFailure = "VMFail";
3474
3475 /* VM-entry controls. */
3476 VMXCTLSMSR EntryCtls;
3477 EntryCtls.u = CPUMGetGuestIa32VmxEntryCtls(pVCpu);
3478 if (~pVmcs->u32EntryCtls & EntryCtls.n.disallowed0)
3479 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
3480
3481 if (pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1)
3482 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1);
3483
3484 /* Event injection. */
3485 uint32_t const uIntInfo = pVmcs->u32EntryIntInfo;
3486 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VALID))
3487 {
3488 /* Type and vector. */
3489 uint8_t const uType = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_TYPE);
3490 uint8_t const uVector = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VECTOR);
3491 uint8_t const uRsvd = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_RSVD_12_30);
3492 if ( uRsvd == 0
3493 && HMVmxIsEntryIntInfoTypeValid(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxMonitorTrapFlag, uType)
3494 && HMVmxIsEntryIntInfoVectorValid(uVector, uType))
3495 { /* likely */ }
3496 else
3497 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd);
3498
3499 /* Exception error code. */
3500 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID))
3501 {
3502 /* Delivery possible only in Unrestricted-guest mode when CR0.PE is set. */
3503 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
3504 || (pVmcs->u64GuestCr0.s.Lo & X86_CR0_PE))
3505 { /* likely */ }
3506 else
3507 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodePe);
3508
3509 /* Exceptions that provide an error code. */
3510 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
3511 && ( uVector == X86_XCPT_DF
3512 || uVector == X86_XCPT_TS
3513 || uVector == X86_XCPT_NP
3514 || uVector == X86_XCPT_SS
3515 || uVector == X86_XCPT_GP
3516 || uVector == X86_XCPT_PF
3517 || uVector == X86_XCPT_AC))
3518 { /* likely */ }
3519 else
3520 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec);
3521
3522 /* Exception error-code reserved bits. */
3523 if (!(pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK))
3524 { /* likely */ }
3525 else
3526 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd);
3527
3528 /* Injecting a software interrupt, software exception or privileged software exception. */
3529 if ( uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
3530 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
3531 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
3532 {
3533 /* Instruction length must be in the range 0-15. */
3534 if (pVmcs->u32EntryInstrLen <= VMX_ENTRY_INSTR_LEN_MAX)
3535 { /* likely */ }
3536 else
3537 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLen);
3538
3539 /* Instruction length of 0 is allowed only when its CPU feature is present. */
3540 if ( pVmcs->u32EntryInstrLen == 0
3541 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEntryInjectSoftInt)
3542 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLenZero);
3543 }
3544 }
3545 }
3546
3547 /* VM-entry MSR-load count and VM-entry MSR-load area address. */
3548 if (pVmcs->u32EntryMsrLoadCount)
3549 {
3550 if ( (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
3551 || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3552 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
3553 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad);
3554 }
3555
3556 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)); /* We don't support SMM yet. */
3557 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON)); /* We don't support dual-monitor treatment yet. */
3558
3559 NOREF(pszInstr);
3560 NOREF(pszFailure);
3561 return VINF_SUCCESS;
3562}
3563
3564
3565/**
3566 * Checks VM-exit controls fields as part of VM-entry.
3567 * See Intel spec. 26.2.1.2 "VM-Exit Control Fields".
3568 *
3569 * @returns VBox status code.
3570 * @param pVCpu The cross context virtual CPU structure.
3571 * @param pszInstr The VMX instruction name (for logging purposes).
3572 */
3573IEM_STATIC int iemVmxVmentryCheckExitCtls(PVMCPU pVCpu, const char *pszInstr)
3574{
3575 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3576 const char * const pszFailure = "VMFail";
3577
3578 /* VM-exit controls. */
3579 VMXCTLSMSR ExitCtls;
3580 ExitCtls.u = CPUMGetGuestIa32VmxExitCtls(pVCpu);
3581 if (~pVmcs->u32ExitCtls & ExitCtls.n.disallowed0)
3582 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
3583
3584 if (pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1)
3585 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1);
3586
3587 /* Save preemption timer without activating it. */
3588 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
3589 && (pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
3590 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer);
3591
3592 /* VM-exit MSR-store count and VM-exit MSR-store area address. */
3593 if (pVmcs->u32ExitMsrStoreCount)
3594 {
3595 if ( (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
3596 || (pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3597 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
3598 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore);
3599 }
3600
3601 /* VM-exit MSR-load count and VM-exit MSR-load area address. */
3602 if (pVmcs->u32ExitMsrLoadCount)
3603 {
3604 if ( (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
3605 || (pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3606 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
3607 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad);
3608 }
3609
3610 NOREF(pszInstr);
3611 NOREF(pszFailure);
3612 return VINF_SUCCESS;
3613}
3614
3615
3616/**
3617 * Checks VM-execution controls fields as part of VM-entry.
3618 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
3619 *
3620 * @returns VBox status code.
3621 * @param pVCpu The cross context virtual CPU structure.
3622 * @param pszInstr The VMX instruction name (for logging purposes).
3623 *
3624 * @remarks This may update secondary-processor based VM-execution control fields
3625 * in the current VMCS if necessary.
3626 */
3627IEM_STATIC int iemVmxVmentryCheckExecCtls(PVMCPU pVCpu, const char *pszInstr)
3628{
3629 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3630 const char * const pszFailure = "VMFail";
3631
3632 /* Pin-based VM-execution controls. */
3633 {
3634 VMXCTLSMSR PinCtls;
3635 PinCtls.u = CPUMGetGuestIa32VmxPinbasedCtls(pVCpu);
3636 if (~pVmcs->u32PinCtls & PinCtls.n.disallowed0)
3637 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
3638
3639 if (pVmcs->u32PinCtls & ~PinCtls.n.allowed1)
3640 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1);
3641 }
3642
3643 /* Processor-based VM-execution controls. */
3644 {
3645 VMXCTLSMSR ProcCtls;
3646 ProcCtls.u = CPUMGetGuestIa32VmxProcbasedCtls(pVCpu);
3647 if (~pVmcs->u32ProcCtls & ProcCtls.n.disallowed0)
3648 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
3649
3650 if (pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1)
3651 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1);
3652 }
3653
3654 /* Secondary processor-based VM-execution controls. */
3655 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
3656 {
3657 VMXCTLSMSR ProcCtls2;
3658 ProcCtls2.u = CPUMGetGuestIa32VmxProcbasedCtls2(pVCpu);
3659 if (~pVmcs->u32ProcCtls2 & ProcCtls2.n.disallowed0)
3660 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
3661
3662 if (pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1)
3663 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1);
3664 }
3665 else
3666 Assert(!pVmcs->u32ProcCtls2);
3667
3668 /* CR3-target count. */
3669 if (pVmcs->u32Cr3TargetCount <= VMX_V_CR3_TARGET_COUNT)
3670 { /* likely */ }
3671 else
3672 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Cr3TargetCount);
3673
3674 /* IO bitmaps physical addresses. */
3675 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
3676 {
3677 if ( (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK)
3678 || (pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3679 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u))
3680 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA);
3681
3682 if ( (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK)
3683 || (pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3684 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u))
3685 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
3686 }
3687
3688 /* MSR bitmap physical address. */
3689 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
3690 {
3691 if ( (pVmcs->u64AddrMsrBitmap.u & X86_PAGE_4K_OFFSET_MASK)
3692 || (pVmcs->u64AddrMsrBitmap.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3693 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrMsrBitmap.u))
3694 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
3695 }
3696
3697 /* TPR shadow related controls. */
3698 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
3699 {
3700 /* Virtual-APIC page physical address. */
3701 RTGCPHYS GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
3702 if ( (GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
3703 || (GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3704 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
3705 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
3706
3707 /* Read the Virtual-APIC page. */
3708 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
3709 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage),
3710 GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES);
3711 if (RT_FAILURE(rc))
3712 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
3713
3714 /* TPR threshold without virtual-interrupt delivery. */
3715 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
3716 && (pVmcs->u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK))
3717 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdRsvd);
3718
3719 /* TPR threshold and VTPR. */
3720 uint8_t const *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
3721 uint8_t const u8VTpr = *(pbVirtApic + XAPIC_OFF_TPR);
3722 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
3723 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
3724 && RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) > ((u8VTpr >> 4) & UINT32_C(0xf)) /* Bits 4:7 */)
3725 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
3726 }
3727 else
3728 {
3729 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
3730 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
3731 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
3732 { /* likely */ }
3733 else
3734 {
3735 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
3736 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicTprShadow);
3737 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
3738 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ApicRegVirt);
3739 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
3740 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtIntDelivery);
3741 }
3742 }
3743
3744 /* NMI exiting and virtual-NMIs. */
3745 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
3746 && (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
3747 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi);
3748
3749 /* Virtual-NMIs and NMI-window exiting. */
3750 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
3751 && (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
3752 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit);
3753
3754 /* Virtualize APIC accesses. */
3755 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
3756 {
3757 /* APIC-access physical address. */
3758 RTGCPHYS GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
3759 if ( (GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
3760 || (GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3761 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
3762 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
3763 }
3764
3765 /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */
3766 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
3767 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
3768 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
3769
3770 /* Virtual-interrupt delivery requires external interrupt exiting. */
3771 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
3772 && !(pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
3773 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
3774
3775 /* VPID. */
3776 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID)
3777 || pVmcs->u16Vpid != 0)
3778 { /* likely */ }
3779 else
3780 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Vpid);
3781
3782 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_POSTED_INT)); /* We don't support posted interrupts yet. */
3783 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)); /* We don't support EPT yet. */
3784 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PML)); /* We don't support PML yet. */
3785 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)); /* We don't support Unrestricted-guests yet. */
3786 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMFUNC)); /* We don't support VM functions yet. */
3787 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT_VE)); /* We don't support EPT-violation #VE yet. */
3788
3789 /* VMCS shadowing. */
3790 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3791 {
3792 /* VMREAD-bitmap physical address. */
3793 RTGCPHYS GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
3794 if ( ( GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
3795 || ( GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3796 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
3797 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap);
3798
3799 /* VMWRITE-bitmap physical address. */
3800 RTGCPHYS GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
3801 if ( ( GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
3802 || ( GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3803 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
3804 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
3805
3806 /* Read the VMREAD-bitmap. */
3807 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
3808 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap),
3809 GCPhysVmreadBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
3810 if (RT_FAILURE(rc))
3811 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys);
3812
3813 /* Read the VMWRITE-bitmap. */
3814 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap));
3815 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap),
3816 GCPhysVmwriteBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
3817 if (RT_FAILURE(rc))
3818 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys);
3819 }
3820
3821 NOREF(pszInstr);
3822 NOREF(pszFailure);
3823 return VINF_SUCCESS;
3824}
3825
3826
3827/**
3828 * VMLAUNCH/VMRESUME instruction execution worker.
3829 *
3830 * @param pVCpu The cross context virtual CPU structure.
3831 * @param cbInstr The instruction length.
3832 * @param uInstrId The instruction identity (either VMXINSTRID_VMLAUNCH or
3833 * VMXINSTRID_VMRESUME).
3834 * @param pExitInfo Pointer to the VM-exit instruction information struct.
3835 * Optional, can be NULL.
3836 *
3837 * @remarks Common VMX instruction checks are already expected to by the caller,
3838 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
3839 */
3840IEM_STATIC VBOXSTRICTRC iemVmxVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId, PCVMXVEXITINFO pExitInfo)
3841{
3842 Assert( uInstrId == VMXINSTRID_VMLAUNCH
3843 || uInstrId == VMXINSTRID_VMRESUME);
3844
3845 const char *pszInstr = uInstrId == VMXINSTRID_VMLAUNCH ? "vmlaunch" : "vmresume";
3846 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
3847 {
3848 RT_NOREF(pExitInfo);
3849 /** @todo NSTVMX: intercept. */
3850 }
3851 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
3852
3853 /* CPL. */
3854 if (pVCpu->iem.s.uCpl > 0)
3855 {
3856 Log(("%s: CPL %u -> #GP(0)\n", pszInstr, pVCpu->iem.s.uCpl));
3857 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_Cpl;
3858 return iemRaiseGeneralProtectionFault0(pVCpu);
3859 }
3860
3861 /* Current VMCS valid. */
3862 if (!IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
3863 {
3864 Log(("%s: VMCS pointer %#RGp invalid -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
3865 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrInvalid;
3866 iemVmxVmFailInvalid(pVCpu);
3867 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3868 return VINF_SUCCESS;
3869 }
3870
3871 /** @todo Distinguish block-by-MOV-SS from block-by-STI. Currently we
3872 * use block-by-STI here which is not quite correct. */
3873 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3874 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
3875 {
3876 Log(("%s: VM entry with events blocked by MOV SS -> VMFail\n", pszInstr));
3877 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_BlocKMovSS;
3878 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_BLOCK_MOVSS);
3879 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3880 return VINF_SUCCESS;
3881 }
3882
3883 if (uInstrId == VMXINSTRID_VMLAUNCH)
3884 {
3885 /* VMLAUNCH with non-clear VMCS. */
3886 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState != VMX_V_VMCS_STATE_CLEAR)
3887 {
3888 Log(("vmlaunch: VMLAUNCH with non-clear VMCS -> VMFail\n"));
3889 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsClear;
3890 iemVmxVmFail(pVCpu, VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS);
3891 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3892 return VINF_SUCCESS;
3893 }
3894 }
3895 else
3896 {
3897 /* VMRESUME with non-launched VMCS. */
3898 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState != VMX_V_VMCS_STATE_LAUNCHED)
3899 {
3900 Log(("vmresume: VMRESUME with non-launched VMCS -> VMFail\n"));
3901 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsLaunch;
3902 iemVmxVmFail(pVCpu, VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS);
3903 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3904 return VINF_SUCCESS;
3905 }
3906 }
3907
3908 /*
3909 * Load the current VMCS.
3910 */
3911 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
3912 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs),
3913 IEM_VMX_GET_CURRENT_VMCS(pVCpu), VMX_V_VMCS_SIZE);
3914 if (RT_FAILURE(rc))
3915 {
3916 Log(("%s: Failed to read VMCS at %#RGp, rc=%Rrc\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu), rc));
3917 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrReadPhys;
3918 return rc;
3919 }
3920
3921 /*
3922 * Check VM-execution control fields.
3923 */
3924 rc = iemVmxVmentryCheckExecCtls(pVCpu, pszInstr);
3925 if (rc == VINF_SUCCESS)
3926 { /* likely */ }
3927 else
3928 {
3929 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
3930 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3931 return VINF_SUCCESS;
3932 }
3933
3934 /*
3935 * Check VM-exit control fields.
3936 */
3937 rc = iemVmxVmentryCheckExitCtls(pVCpu, pszInstr);
3938 if (rc == VINF_SUCCESS)
3939 { /* likely */ }
3940 else
3941 {
3942 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
3943 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3944 return VINF_SUCCESS;
3945 }
3946
3947 /*
3948 * Check VM-entry control fields.
3949 */
3950 rc = iemVmxVmentryCheckEntryCtls(pVCpu, pszInstr);
3951 if (rc == VINF_SUCCESS)
3952 { /* likely */ }
3953 else
3954 {
3955 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
3956 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3957 return VINF_SUCCESS;
3958 }
3959
3960 /*
3961 * Check host-state fields.
3962 */
3963 rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr);
3964 if (rc == VINF_SUCCESS)
3965 { /* likely */ }
3966 else
3967 {
3968 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_HOST_STATE);
3969 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3970 return VINF_SUCCESS;
3971 }
3972
3973 /*
3974 * Check guest-state fields.
3975 */
3976 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
3977 if (rc == VINF_SUCCESS)
3978 { /* likely */ }
3979 else
3980 {
3981 /** @todo NSTVMX: VMExit with VMX_EXIT_ERR_INVALID_GUEST_STATE and set
3982 * VMX_BF_EXIT_REASON_ENTRY_FAILED. */
3983 return VINF_SUCCESS;
3984 }
3985
3986 iemVmxVmSucceed(pVCpu);
3987 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3988 return VERR_IEM_IPE_2;
3989}
3990
3991
3992/**
3993 * Implements 'VMXON'.
3994 */
3995IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
3996{
3997 return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
3998}
3999
4000
4001/**
4002 * Implements 'VMXOFF'.
4003 *
4004 * @remarks Common VMX instruction checks are already expected to by the caller,
4005 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
4006 */
4007IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
4008{
4009# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
4010 RT_NOREF2(pVCpu, cbInstr);
4011 return VINF_EM_RAW_EMULATE_INSTR;
4012# else
4013 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
4014 {
4015 /** @todo NSTVMX: intercept. */
4016 }
4017
4018 /* CPL. */
4019 if (pVCpu->iem.s.uCpl > 0)
4020 {
4021 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
4022 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxoff_Cpl;
4023 return iemRaiseGeneralProtectionFault0(pVCpu);
4024 }
4025
4026 /* Dual monitor treatment of SMIs and SMM. */
4027 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
4028 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
4029 {
4030 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
4031 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4032 return VINF_SUCCESS;
4033 }
4034
4035 /*
4036 * Record that we're no longer in VMX root operation, block INIT, block and disable A20M.
4037 */
4038 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
4039 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
4040
4041 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
4042 { /** @todo NSTVMX: Unblock SMI. */ }
4043
4044 /** @todo NSTVMX: Unblock and enable A20M. */
4045 /** @todo NSTVMX: Clear address-range monitoring. */
4046
4047 iemVmxVmSucceed(pVCpu);
4048 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4049# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
4050 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
4051# else
4052 return VINF_SUCCESS;
4053# endif
4054# endif
4055}
4056
4057
4058/**
4059 * Implements 'VMLAUNCH'.
4060 */
4061IEM_CIMPL_DEF_0(iemCImpl_vmlaunch)
4062{
4063 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMLAUNCH, NULL /* pExitInfo */);
4064}
4065
4066
4067/**
4068 * Implements 'VMRESUME'.
4069 */
4070IEM_CIMPL_DEF_0(iemCImpl_vmresume)
4071{
4072 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMRESUME, NULL /* pExitInfo */);
4073}
4074
4075
4076/**
4077 * Implements 'VMPTRLD'.
4078 */
4079IEM_CIMPL_DEF_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
4080{
4081 return iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
4082}
4083
4084
4085/**
4086 * Implements 'VMPTRST'.
4087 */
4088IEM_CIMPL_DEF_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
4089{
4090 return iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
4091}
4092
4093
4094/**
4095 * Implements 'VMCLEAR'.
4096 */
4097IEM_CIMPL_DEF_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
4098{
4099 return iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
4100}
4101
4102
4103/**
4104 * Implements 'VMWRITE' register.
4105 */
4106IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64FieldEnc)
4107{
4108 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, IEMMODE_64BIT /* N/A */, u64Val, u64FieldEnc,
4109 NULL /* pExitInfo */);
4110}
4111
4112
4113/**
4114 * Implements 'VMWRITE' memory.
4115 */
4116IEM_CIMPL_DEF_4(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrVal, uint32_t, u64FieldEnc)
4117{
4118 return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrVal, u64FieldEnc, NULL /* pExitInfo */);
4119}
4120
4121
4122/**
4123 * Implements 'VMREAD' 64-bit register.
4124 */
4125IEM_CIMPL_DEF_2(iemCImpl_vmread64_reg, uint64_t *, pu64Dst, uint64_t, u64FieldEnc)
4126{
4127 return iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, NULL /* pExitInfo */);
4128}
4129
4130
4131/**
4132 * Implements 'VMREAD' 32-bit register.
4133 */
4134IEM_CIMPL_DEF_2(iemCImpl_vmread32_reg, uint32_t *, pu32Dst, uint32_t, u32FieldEnc)
4135{
4136 return iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u32FieldEnc, NULL /* pExitInfo */);
4137}
4138
4139
4140/**
4141 * Implements 'VMREAD' memory.
4142 */
4143IEM_CIMPL_DEF_4(iemCImpl_vmread_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrDst, uint32_t, u64FieldEnc)
4144{
4145 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, u64FieldEnc, NULL /* pExitInfo */);
4146}
4147
4148#endif
4149
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette