VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 74155

Last change on this file since 74155 was 74155, checked in by vboxsync, 6 years ago

VMM: Nested VMX: bugref:9180 VMXVDIAG naming.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 151.5 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 74155 2018-09-09 12:37:26Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/**
20 * Implements 'VMCALL'.
21 */
22IEM_CIMPL_DEF_0(iemCImpl_vmcall)
23{
24 /** @todo NSTVMX: intercept. */
25
26 /* Join forces with vmmcall. */
27 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
28}
29
30#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
31/**
32 * Map of VMCS field encodings to their virtual-VMCS structure offsets.
33 *
34 * The first array dimension is VMCS field encoding of Width OR'ed with Type and the
35 * second dimension is the Index, see VMXVMCSFIELDENC.
36 */
37uint16_t const g_aoffVmcsMap[16][VMX_V_VMCS_MAX_INDEX + 1] =
38{
39 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
40 {
41 /* 0 */ RT_OFFSETOF(VMXVVMCS, u16Vpid),
42 /* 1 */ RT_OFFSETOF(VMXVVMCS, u16PostIntNotifyVector),
43 /* 2 */ RT_OFFSETOF(VMXVVMCS, u16EptpIndex),
44 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
45 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
46 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
47 },
48 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
49 {
50 /* 0-7 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
51 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
52 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
53 /* 24-25 */ UINT16_MAX, UINT16_MAX
54 },
55 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
56 {
57 /* 0 */ RT_OFFSETOF(VMXVVMCS, GuestEs),
58 /* 1 */ RT_OFFSETOF(VMXVVMCS, GuestCs),
59 /* 2 */ RT_OFFSETOF(VMXVVMCS, GuestSs),
60 /* 3 */ RT_OFFSETOF(VMXVVMCS, GuestDs),
61 /* 4 */ RT_OFFSETOF(VMXVVMCS, GuestFs),
62 /* 5 */ RT_OFFSETOF(VMXVVMCS, GuestGs),
63 /* 6 */ RT_OFFSETOF(VMXVVMCS, GuestLdtr),
64 /* 7 */ RT_OFFSETOF(VMXVVMCS, GuestTr),
65 /* 8 */ RT_OFFSETOF(VMXVVMCS, u16GuestIntStatus),
66 /* 9 */ RT_OFFSETOF(VMXVVMCS, u16PmlIndex),
67 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
68 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
69 },
70 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
71 {
72 /* 0 */ RT_OFFSETOF(VMXVVMCS, HostEs),
73 /* 1 */ RT_OFFSETOF(VMXVVMCS, HostCs),
74 /* 2 */ RT_OFFSETOF(VMXVVMCS, HostSs),
75 /* 3 */ RT_OFFSETOF(VMXVVMCS, HostDs),
76 /* 4 */ RT_OFFSETOF(VMXVVMCS, HostFs),
77 /* 5 */ RT_OFFSETOF(VMXVVMCS, HostGs),
78 /* 6 */ RT_OFFSETOF(VMXVVMCS, HostTr),
79 /* 7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
80 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
81 /* 23-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
82 },
83 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
84 {
85 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapA),
86 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
87 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
88 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64AddrExitMsrStore),
89 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64AddrExitMsrLoad),
90 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64AddrEntryMsrLoad),
91 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
92 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64AddrPml),
93 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64TscOffset),
94 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64AddrVirtApic),
95 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64AddrApicAccess),
96 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64AddrPostedIntDesc),
97 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64VmFuncCtls),
98 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64EptpPtr),
99 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap0),
100 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap1),
101 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap2),
102 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap3),
103 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64AddrEptpList),
104 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmreadBitmap),
105 /* 20 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmwriteBitmap),
106 /* 21 */ RT_OFFSETOF(VMXVVMCS, u64AddrXcptVeInfo),
107 /* 22 */ RT_OFFSETOF(VMXVVMCS, u64AddrXssBitmap),
108 /* 23 */ RT_OFFSETOF(VMXVVMCS, u64AddrEnclsBitmap),
109 /* 24 */ UINT16_MAX,
110 /* 25 */ RT_OFFSETOF(VMXVVMCS, u64TscMultiplier)
111 },
112 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
113 {
114 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64GuestPhysAddr),
115 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
116 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
117 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
118 /* 25 */ UINT16_MAX
119 },
120 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
121 {
122 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
123 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
124 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestPatMsr),
125 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEferMsr),
126 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
127 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte0),
128 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte1),
129 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte2),
130 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte3),
131 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
132 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
133 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
134 },
135 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
136 {
137 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostPatMsr),
138 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostEferMsr),
139 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
140 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
141 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
142 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
143 },
144 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
145 {
146 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32PinCtls),
147 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls),
148 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32XcptBitmap),
149 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMask),
150 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMatch),
151 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32Cr3TargetCount),
152 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32ExitCtls),
153 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
154 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
155 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32EntryCtls),
156 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
157 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32EntryIntInfo),
158 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
159 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32EntryInstrLen),
160 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32TprThreshold),
161 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls2),
162 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32PleGap),
163 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32PleWindow),
164 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
165 },
166 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
167 {
168 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32RoVmInstrError),
169 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32RoExitReason),
170 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32RoExitIntInfo),
171 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32RoExitErrCode),
172 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
173 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
174 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32RoExitInstrLen),
175 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32RoExitInstrInfo),
176 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
177 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
178 /* 24-25 */ UINT16_MAX, UINT16_MAX
179 },
180 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
181 {
182 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
183 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsLimit),
184 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsLimit),
185 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsLimit),
186 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
187 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsLimit),
188 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsLimit),
189 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
190 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrLimit),
191 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
192 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
193 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsAttr),
194 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsAttr),
195 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsAttr),
196 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsAttr),
197 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsAttr),
198 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsAttr),
199 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
200 /* 18 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrAttr),
201 /* 19 */ RT_OFFSETOF(VMXVVMCS, u32GuestIntrState),
202 /* 20 */ RT_OFFSETOF(VMXVVMCS, u32GuestActivityState),
203 /* 21 */ RT_OFFSETOF(VMXVVMCS, u32GuestSmBase),
204 /* 22 */ RT_OFFSETOF(VMXVVMCS, u32GuestSysenterCS),
205 /* 23 */ RT_OFFSETOF(VMXVVMCS, u32PreemptTimer),
206 /* 24-25 */ UINT16_MAX, UINT16_MAX
207 },
208 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
209 {
210 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32HostSysenterCs),
211 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
212 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
213 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
214 /* 25 */ UINT16_MAX
215 },
216 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_CONTROL: */
217 {
218 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64Cr0Mask),
219 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64Cr4Mask),
220 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
221 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
222 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target0),
223 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target1),
224 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target2),
225 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target3),
226 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
227 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
228 /* 24-25 */ UINT16_MAX, UINT16_MAX
229 },
230 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
231 {
232 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64ExitQual),
233 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64IoRcx),
234 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64IoRsi),
235 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64IoRdi),
236 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64IoRip),
237 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestLinearAddr),
238 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
239 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
240 /* 22-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
241 },
242 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
243 {
244 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr0),
245 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr3),
246 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr4),
247 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEsBase),
248 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestCsBase),
249 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestSsBase),
250 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestDsBase),
251 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestFsBase),
252 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestGsBase),
253 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestLdtrBase),
254 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64GuestTrBase),
255 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64GuestGdtrBase),
256 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64GuestIdtrBase),
257 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64GuestDr7),
258 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64GuestRsp),
259 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64GuestRip),
260 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64GuestRFlags),
261 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpt),
262 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
263 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEip),
264 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
265 },
266 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_HOST_STATE: */
267 {
268 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostCr0),
269 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostCr3),
270 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostCr4),
271 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64HostFsBase),
272 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64HostGsBase),
273 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64HostTrBase),
274 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64HostGdtrBase),
275 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64HostIdtrBase),
276 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEsp),
277 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEip),
278 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64HostRsp),
279 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64HostRip),
280 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
281 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
282 }
283};
284
285
286/**
287 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
288 * relative offsets.
289 */
290# ifdef IEM_WITH_CODE_TLB
291# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
292# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
293# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
294# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
295# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
296# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
297# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
298# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
299# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
300# else /* !IEM_WITH_CODE_TLB */
301# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
302 do \
303 { \
304 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
305 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
306 } while (0)
307
308# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
309
310# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
311 do \
312 { \
313 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
314 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
315 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
316 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
317 } while (0)
318
319# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
320 do \
321 { \
322 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
323 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
324 } while (0)
325
326# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
327 do \
328 { \
329 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
330 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
331 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
332 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
333 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
334 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
335 } while (0)
336
337# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
338 do \
339 { \
340 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
341 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
342 } while (0)
343
344# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
345 do \
346 { \
347 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
348 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
349 } while (0)
350
351# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
352 do \
353 { \
354 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
355 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
356 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
357 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
358 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
359 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
360 } while (0)
361# endif /* !IEM_WITH_CODE_TLB */
362
363/** Whether a shadow VMCS is present for the given VCPU. */
364#define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
365
366
367/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
368#define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u64VmcsLinkPtr.u)
369
370/** Whether a current VMCS is present for the given VCPU. */
371#define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
372
373/** Gets the guest-physical address of the current VMCS for the given VCPU. */
374#define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
375
376/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
377#define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
378 do \
379 { \
380 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
381 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
382 } while (0)
383
384/** Clears any current VMCS for the given VCPU. */
385#define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
386 do \
387 { \
388 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
389 } while (0)
390
391/** Check the common VMX instruction preconditions.
392 * @note Any changes here, also check if IEMOP_HLP_VMX_INSTR needs updating.
393 */
394#define IEM_VMX_INSTR_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
395 do { \
396 if ( !IEM_IS_REAL_OR_V86_MODE(a_pVCpu) \
397 && ( !IEM_IS_LONG_MODE(a_pVCpu) \
398 || IEM_IS_64BIT_CODE(a_pVCpu))) \
399 { /* likely */ } \
400 else \
401 { \
402 if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \
403 { \
404 Log((a_szInstr ": Real or v8086 mode -> #UD\n")); \
405 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
406 return iemRaiseUndefinedOpcode(a_pVCpu); \
407 } \
408 if (IEM_IS_LONG_MODE(a_pVCpu) && !IEM_IS_64BIT_CODE(a_pVCpu)) \
409 { \
410 Log((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
411 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
412 return iemRaiseUndefinedOpcode(a_pVCpu); \
413 } \
414 } \
415 } while (0)
416
417/** Check for VMX instructions requiring to be in VMX operation.
418 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs udpating. */
419#define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
420 do \
421 { \
422 if (IEM_IS_VMX_ROOT_MODE(a_pVCpu)) \
423 { /* likely */ } \
424 else \
425 { \
426 Log((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
427 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
428 return iemRaiseUndefinedOpcode(a_pVCpu); \
429 } \
430 } while (0)
431
432/** Marks a VM-entry failure with a diagnostic reason, logs and returns. */
433#define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_InsDiag) \
434 do \
435 { \
436 Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_InsDiag), \
437 HMVmxGetDiagDesc(a_InsDiag), (a_pszFailure))); \
438 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_InsDiag); \
439 return VERR_VMX_VMENTRY_FAILED; \
440 } while (0)
441
442
443/**
444 * Returns whether the given VMCS field is valid and supported by our emulation.
445 *
446 * @param pVCpu The cross context virtual CPU structure.
447 * @param u64FieldEnc The VMCS field encoding.
448 *
449 * @remarks This takes into account the CPU features exposed to the guest.
450 */
451IEM_STATIC bool iemVmxIsVmcsFieldValid(PVMCPU pVCpu, uint64_t u64FieldEnc)
452{
453 uint32_t const uFieldEncHi = RT_HI_U32(u64FieldEnc);
454 uint32_t const uFieldEncLo = RT_LO_U32(u64FieldEnc);
455 if (!uFieldEncHi)
456 { /* likely */ }
457 else
458 return false;
459
460 PCCPUMFEATURES pFeat = IEM_GET_GUEST_CPU_FEATURES(pVCpu);
461 switch (uFieldEncLo)
462 {
463 /*
464 * 16-bit fields.
465 */
466 /* Control fields. */
467 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
468 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
469 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
470
471 /* Guest-state fields. */
472 case VMX_VMCS16_GUEST_ES_SEL:
473 case VMX_VMCS16_GUEST_CS_SEL:
474 case VMX_VMCS16_GUEST_SS_SEL:
475 case VMX_VMCS16_GUEST_DS_SEL:
476 case VMX_VMCS16_GUEST_FS_SEL:
477 case VMX_VMCS16_GUEST_GS_SEL:
478 case VMX_VMCS16_GUEST_LDTR_SEL:
479 case VMX_VMCS16_GUEST_TR_SEL:
480 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
481 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
482
483 /* Host-state fields. */
484 case VMX_VMCS16_HOST_ES_SEL:
485 case VMX_VMCS16_HOST_CS_SEL:
486 case VMX_VMCS16_HOST_SS_SEL:
487 case VMX_VMCS16_HOST_DS_SEL:
488 case VMX_VMCS16_HOST_FS_SEL:
489 case VMX_VMCS16_HOST_GS_SEL:
490 case VMX_VMCS16_HOST_TR_SEL: return true;
491
492 /*
493 * 64-bit fields.
494 */
495 /* Control fields. */
496 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
497 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
498 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
499 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
500 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
501 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
502 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
503 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
504 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
505 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
506 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
507 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
508 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
509 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
510 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
511 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
512 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
513 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
514 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
515 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
516 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
517 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
518 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
519 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
520 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
521 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
522 case VMX_VMCS64_CTRL_EPTP_FULL:
523 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
524 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
525 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
526 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
527 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
528 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
529 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
530 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
531 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
532 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
533 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
534 {
535 uint64_t const uVmFuncMsr = CPUMGetGuestIa32VmxVmFunc(pVCpu);
536 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
537 }
538 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
539 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
540 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
541 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
542 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
543 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
544 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
545 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
546 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
547 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
548 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
549 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
550
551 /* Read-only data fields. */
552 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
553 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
554
555 /* Guest-state fields. */
556 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
557 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
558 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
559 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
560 case VMX_VMCS64_GUEST_PAT_FULL:
561 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
562 case VMX_VMCS64_GUEST_EFER_FULL:
563 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
564 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
565 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
566 case VMX_VMCS64_GUEST_PDPTE0_FULL:
567 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
568 case VMX_VMCS64_GUEST_PDPTE1_FULL:
569 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
570 case VMX_VMCS64_GUEST_PDPTE2_FULL:
571 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
572 case VMX_VMCS64_GUEST_PDPTE3_FULL:
573 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
574 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
575 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
576
577 /* Host-state fields. */
578 case VMX_VMCS64_HOST_PAT_FULL:
579 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
580 case VMX_VMCS64_HOST_EFER_FULL:
581 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
582 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
583 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
584
585 /*
586 * 32-bit fields.
587 */
588 /* Control fields. */
589 case VMX_VMCS32_CTRL_PIN_EXEC:
590 case VMX_VMCS32_CTRL_PROC_EXEC:
591 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
592 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
593 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
594 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
595 case VMX_VMCS32_CTRL_EXIT:
596 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
597 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
598 case VMX_VMCS32_CTRL_ENTRY:
599 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
600 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
601 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
602 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
603 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
604 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
605 case VMX_VMCS32_CTRL_PLE_GAP:
606 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
607
608 /* Read-only data fields. */
609 case VMX_VMCS32_RO_VM_INSTR_ERROR:
610 case VMX_VMCS32_RO_EXIT_REASON:
611 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
612 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
613 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
614 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
615 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
616 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
617
618 /* Guest-state fields. */
619 case VMX_VMCS32_GUEST_ES_LIMIT:
620 case VMX_VMCS32_GUEST_CS_LIMIT:
621 case VMX_VMCS32_GUEST_SS_LIMIT:
622 case VMX_VMCS32_GUEST_DS_LIMIT:
623 case VMX_VMCS32_GUEST_FS_LIMIT:
624 case VMX_VMCS32_GUEST_GS_LIMIT:
625 case VMX_VMCS32_GUEST_LDTR_LIMIT:
626 case VMX_VMCS32_GUEST_TR_LIMIT:
627 case VMX_VMCS32_GUEST_GDTR_LIMIT:
628 case VMX_VMCS32_GUEST_IDTR_LIMIT:
629 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
630 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
631 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
632 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
633 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
634 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
635 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
636 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
637 case VMX_VMCS32_GUEST_INT_STATE:
638 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
639 case VMX_VMCS32_GUEST_SMBASE:
640 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
641 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
642
643 /* Host-state fields. */
644 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
645
646 /*
647 * Natural-width fields.
648 */
649 /* Control fields. */
650 case VMX_VMCS_CTRL_CR0_MASK:
651 case VMX_VMCS_CTRL_CR4_MASK:
652 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
653 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
654 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
655 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
656 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
657 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
658
659 /* Read-only data fields. */
660 case VMX_VMCS_RO_EXIT_QUALIFICATION:
661 case VMX_VMCS_RO_IO_RCX:
662 case VMX_VMCS_RO_IO_RSX:
663 case VMX_VMCS_RO_IO_RDI:
664 case VMX_VMCS_RO_IO_RIP:
665 case VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR: return true;
666
667 /* Guest-state fields. */
668 case VMX_VMCS_GUEST_CR0:
669 case VMX_VMCS_GUEST_CR3:
670 case VMX_VMCS_GUEST_CR4:
671 case VMX_VMCS_GUEST_ES_BASE:
672 case VMX_VMCS_GUEST_CS_BASE:
673 case VMX_VMCS_GUEST_SS_BASE:
674 case VMX_VMCS_GUEST_DS_BASE:
675 case VMX_VMCS_GUEST_FS_BASE:
676 case VMX_VMCS_GUEST_GS_BASE:
677 case VMX_VMCS_GUEST_LDTR_BASE:
678 case VMX_VMCS_GUEST_TR_BASE:
679 case VMX_VMCS_GUEST_GDTR_BASE:
680 case VMX_VMCS_GUEST_IDTR_BASE:
681 case VMX_VMCS_GUEST_DR7:
682 case VMX_VMCS_GUEST_RSP:
683 case VMX_VMCS_GUEST_RIP:
684 case VMX_VMCS_GUEST_RFLAGS:
685 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
686 case VMX_VMCS_GUEST_SYSENTER_ESP:
687 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
688
689 /* Host-state fields. */
690 case VMX_VMCS_HOST_CR0:
691 case VMX_VMCS_HOST_CR3:
692 case VMX_VMCS_HOST_CR4:
693 case VMX_VMCS_HOST_FS_BASE:
694 case VMX_VMCS_HOST_GS_BASE:
695 case VMX_VMCS_HOST_TR_BASE:
696 case VMX_VMCS_HOST_GDTR_BASE:
697 case VMX_VMCS_HOST_IDTR_BASE:
698 case VMX_VMCS_HOST_SYSENTER_ESP:
699 case VMX_VMCS_HOST_SYSENTER_EIP:
700 case VMX_VMCS_HOST_RSP:
701 case VMX_VMCS_HOST_RIP: return true;
702 }
703
704 return false;
705}
706
707
708/**
709 * Gets a segment register from the VMCS given its index.
710 *
711 * @returns VBox status code.
712 * @param pVmcs Pointer to the virtual VMCS.
713 * @param iSegReg The index of the segment register (X86_SREG_XXX).
714 * @param pSelReg Where to store the segment register (only updated when
715 * VINF_SUCCESS is returned).
716 *
717 * @remarks Warning! This does not validate the contents of the retreived segment
718 * register.
719 */
720IEM_STATIC int iemVmxVmcsGetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCPUMSELREG pSelReg)
721{
722 Assert(pSelReg);
723 Assert(iSegReg < X86_SREG_COUNT);
724
725 /* Selector. */
726 uint16_t u16Sel;
727 {
728 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
729 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
730 uint8_t const uWidthType = (uWidth << 2) | uType;
731 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
732 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
733 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
734 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
735 uint8_t const *pbField = pbVmcs + offField;
736 u16Sel = *(uint16_t *)pbField;
737 }
738
739 /* Limit. */
740 uint32_t u32Limit;
741 {
742 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
743 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
744 uint8_t const uWidthType = (uWidth << 2) | uType;
745 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
746 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
747 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
748 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
749 uint8_t const *pbField = pbVmcs + offField;
750 u32Limit = *(uint32_t *)pbField;
751 }
752
753 /* Base. */
754 uint64_t u64Base;
755 {
756 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
757 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
758 uint8_t const uWidthType = (uWidth << 2) | uType;
759 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
760 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
761 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
762 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
763 uint8_t const *pbField = pbVmcs + offField;
764 u64Base = *(uint64_t *)pbField;
765 /** @todo NSTVMX: Should we zero out high bits here for 32-bit virtual CPUs? */
766 }
767
768 /* Attributes. */
769 uint32_t u32Attr;
770 {
771 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
772 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
773 uint8_t const uWidthType = (uWidth << 2) | uType;
774 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
775 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
776 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
777 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
778 uint8_t const *pbField = pbVmcs + offField;
779 u32Attr = *(uint32_t *)pbField;
780 }
781
782 pSelReg->Sel = u16Sel;
783 pSelReg->u32Limit = u32Limit;
784 pSelReg->u64Base = u64Base;
785 pSelReg->Attr.u = u32Attr;
786 return VINF_SUCCESS;
787}
788
789
790/**
791 * Gets VM-exit instruction information along with any displacement for an
792 * instruction VM-exit.
793 *
794 * @returns The VM-exit instruction information.
795 * @param pVCpu The cross context virtual CPU structure.
796 * @param uExitReason The VM-exit reason.
797 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_XXX) if
798 * any. Pass VMXINSTRID_NONE otherwise.
799 * @param fPrimaryOpRead If the primary operand of the ModR/M byte (bits 0:3) is
800 * a read or write.
801 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
802 * NULL.
803 */
804IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, bool fPrimaryOpRead,
805 PRTGCPTR pGCPtrDisp)
806{
807 RTGCPTR GCPtrDisp;
808 VMXEXITINSTRINFO ExitInstrInfo;
809 ExitInstrInfo.u = 0;
810
811 /*
812 * Get and parse the ModR/M byte from our decoded opcodes.
813 */
814 uint8_t bRm;
815 uint8_t const offModRm = pVCpu->iem.s.offModRm;
816 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
817 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
818 {
819 /*
820 * ModR/M indicates register addressing.
821 *
822 * The primary/secondary register operands are reported in the iReg1 or iReg2
823 * fields depending on whether it is a read/write form.
824 */
825 uint8_t idxReg1;
826 uint8_t idxReg2;
827 if (fPrimaryOpRead)
828 {
829 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
830 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
831 }
832 else
833 {
834 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
835 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
836 }
837 ExitInstrInfo.All.u2Scaling = 0;
838 ExitInstrInfo.All.iReg1 = idxReg1;
839 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
840 ExitInstrInfo.All.fIsRegOperand = 1;
841 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
842 ExitInstrInfo.All.iSegReg = 0;
843 ExitInstrInfo.All.iIdxReg = 0;
844 ExitInstrInfo.All.fIdxRegInvalid = 1;
845 ExitInstrInfo.All.iBaseReg = 0;
846 ExitInstrInfo.All.fBaseRegInvalid = 1;
847 ExitInstrInfo.All.iReg2 = idxReg2;
848
849 /* Displacement not applicable for register addressing. */
850 GCPtrDisp = 0;
851 }
852 else
853 {
854 /*
855 * ModR/M indicates memory addressing.
856 */
857 uint8_t uScale = 0;
858 bool fBaseRegValid = false;
859 bool fIdxRegValid = false;
860 uint8_t iBaseReg = 0;
861 uint8_t iIdxReg = 0;
862 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
863 {
864 /*
865 * Parse the ModR/M, displacement for 16-bit addressing mode.
866 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
867 */
868 uint16_t u16Disp = 0;
869 uint8_t const offDisp = offModRm + sizeof(bRm);
870 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
871 {
872 /* Displacement without any registers. */
873 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
874 }
875 else
876 {
877 /* Register (index and base). */
878 switch (bRm & X86_MODRM_RM_MASK)
879 {
880 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
881 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
882 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
883 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
884 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
885 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
886 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
887 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
888 }
889
890 /* Register + displacement. */
891 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
892 {
893 case 0: break;
894 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
895 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
896 default:
897 {
898 /* Register addressing, handled at the beginning. */
899 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
900 break;
901 }
902 }
903 }
904
905 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
906 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
907 }
908 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
909 {
910 /*
911 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
912 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
913 */
914 uint32_t u32Disp = 0;
915 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
916 {
917 /* Displacement without any registers. */
918 uint8_t const offDisp = offModRm + sizeof(bRm);
919 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
920 }
921 else
922 {
923 /* Register (and perhaps scale, index and base). */
924 uint8_t offDisp = offModRm + sizeof(bRm);
925 iBaseReg = (bRm & X86_MODRM_RM_MASK);
926 if (iBaseReg == 4)
927 {
928 /* An SIB byte follows the ModR/M byte, parse it. */
929 uint8_t bSib;
930 uint8_t const offSib = offModRm + sizeof(bRm);
931 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
932
933 /* A displacement may follow SIB, update its offset. */
934 offDisp += sizeof(bSib);
935
936 /* Get the scale. */
937 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
938
939 /* Get the index register. */
940 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
941 fIdxRegValid = RT_BOOL(iIdxReg != 4);
942
943 /* Get the base register. */
944 iBaseReg = bSib & X86_SIB_BASE_MASK;
945 fBaseRegValid = true;
946 if (iBaseReg == 5)
947 {
948 if ((bRm & X86_MODRM_MOD_MASK) == 0)
949 {
950 /* Mod is 0 implies a 32-bit displacement with no base. */
951 fBaseRegValid = false;
952 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
953 }
954 else
955 {
956 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
957 iBaseReg = X86_GREG_xBP;
958 }
959 }
960 }
961
962 /* Register + displacement. */
963 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
964 {
965 case 0: /* Handled above */ break;
966 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
967 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
968 default:
969 {
970 /* Register addressing, handled at the beginning. */
971 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
972 break;
973 }
974 }
975 }
976
977 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
978 }
979 else
980 {
981 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
982
983 /*
984 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
985 * See Intel instruction spec. 2.2 "IA-32e Mode".
986 */
987 uint64_t u64Disp = 0;
988 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
989 if (fRipRelativeAddr)
990 {
991 /*
992 * RIP-relative addressing mode.
993 *
994 * The displacment is 32-bit signed implying an offset range of +/-2G.
995 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
996 */
997 uint8_t const offDisp = offModRm + sizeof(bRm);
998 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
999 }
1000 else
1001 {
1002 uint8_t offDisp = offModRm + sizeof(bRm);
1003
1004 /*
1005 * Register (and perhaps scale, index and base).
1006 *
1007 * REX.B extends the most-significant bit of the base register. However, REX.B
1008 * is ignored while determining whether an SIB follows the opcode. Hence, we
1009 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
1010 *
1011 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
1012 */
1013 iBaseReg = (bRm & X86_MODRM_RM_MASK);
1014 if (iBaseReg == 4)
1015 {
1016 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
1017 uint8_t bSib;
1018 uint8_t const offSib = offModRm + sizeof(bRm);
1019 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
1020
1021 /* Displacement may follow SIB, update its offset. */
1022 offDisp += sizeof(bSib);
1023
1024 /* Get the scale. */
1025 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
1026
1027 /* Get the index. */
1028 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
1029 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
1030
1031 /* Get the base. */
1032 iBaseReg = (bSib & X86_SIB_BASE_MASK);
1033 fBaseRegValid = true;
1034 if (iBaseReg == 5)
1035 {
1036 if ((bRm & X86_MODRM_MOD_MASK) == 0)
1037 {
1038 /* Mod is 0 implies a signed 32-bit displacement with no base. */
1039 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
1040 }
1041 else
1042 {
1043 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
1044 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
1045 }
1046 }
1047 }
1048 iBaseReg |= pVCpu->iem.s.uRexB;
1049
1050 /* Register + displacement. */
1051 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1052 {
1053 case 0: /* Handled above */ break;
1054 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
1055 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
1056 default:
1057 {
1058 /* Register addressing, handled at the beginning. */
1059 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
1060 break;
1061 }
1062 }
1063 }
1064
1065 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
1066 }
1067
1068 /*
1069 * The primary or secondary register operand is reported in iReg2 depending
1070 * on whether the primary operand is in read/write form.
1071 */
1072 uint8_t idxReg2;
1073 if (fPrimaryOpRead)
1074 {
1075 idxReg2 = bRm & X86_MODRM_RM_MASK;
1076 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
1077 idxReg2 |= pVCpu->iem.s.uRexB;
1078 }
1079 else
1080 {
1081 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK;
1082 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
1083 idxReg2 |= pVCpu->iem.s.uRexReg;
1084 }
1085 ExitInstrInfo.All.u2Scaling = uScale;
1086 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */
1087 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
1088 ExitInstrInfo.All.fIsRegOperand = 0;
1089 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
1090 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
1091 ExitInstrInfo.All.iIdxReg = iIdxReg;
1092 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
1093 ExitInstrInfo.All.iBaseReg = iBaseReg;
1094 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
1095 ExitInstrInfo.All.iReg2 = idxReg2;
1096 }
1097
1098 /*
1099 * Handle exceptions to the norm for certain instructions.
1100 * (e.g. some instructions convey an instruction identity in place of iReg2).
1101 */
1102 switch (uExitReason)
1103 {
1104 case VMX_EXIT_GDTR_IDTR_ACCESS:
1105 {
1106 Assert(VMXINSTRID_IS_VALID(uInstrId));
1107 ExitInstrInfo.GdtIdt.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
1108 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
1109 break;
1110 }
1111
1112 case VMX_EXIT_LDTR_TR_ACCESS:
1113 {
1114 Assert(VMXINSTRID_IS_VALID(uInstrId));
1115 ExitInstrInfo.LdtTr.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
1116 ExitInstrInfo.LdtTr.u2Undef0 = 0;
1117 break;
1118 }
1119
1120 case VMX_EXIT_RDRAND:
1121 case VMX_EXIT_RDSEED:
1122 {
1123 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
1124 break;
1125 }
1126 }
1127
1128 /* Update displacement and return the constructed VM-exit instruction information field. */
1129 if (pGCPtrDisp)
1130 *pGCPtrDisp = GCPtrDisp;
1131 return ExitInstrInfo.u;
1132}
1133
1134
1135/**
1136 * Implements VMSucceed for VMX instruction success.
1137 *
1138 * @param pVCpu The cross context virtual CPU structure.
1139 */
1140DECL_FORCE_INLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
1141{
1142 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1143}
1144
1145
1146/**
1147 * Implements VMFailInvalid for VMX instruction failure.
1148 *
1149 * @param pVCpu The cross context virtual CPU structure.
1150 */
1151DECL_FORCE_INLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
1152{
1153 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1154 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
1155}
1156
1157
1158/**
1159 * Implements VMFailValid for VMX instruction failure.
1160 *
1161 * @param pVCpu The cross context virtual CPU structure.
1162 * @param enmInsErr The VM instruction error.
1163 */
1164DECL_FORCE_INLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1165{
1166 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1167 {
1168 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1169 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
1170 /** @todo NSTVMX: VMWrite enmInsErr to VM-instruction error field. */
1171 RT_NOREF(enmInsErr);
1172 }
1173}
1174
1175
1176/**
1177 * Implements VMFail for VMX instruction failure.
1178 *
1179 * @param pVCpu The cross context virtual CPU structure.
1180 * @param enmInsErr The VM instruction error.
1181 */
1182DECL_FORCE_INLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1183{
1184 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1185 {
1186 iemVmxVmFailValid(pVCpu, enmInsErr);
1187 /** @todo Set VM-instruction error field in the current virtual-VMCS. */
1188 }
1189 else
1190 iemVmxVmFailInvalid(pVCpu);
1191}
1192
1193
1194/**
1195 * Flushes the current VMCS contents back to guest memory.
1196 *
1197 * @returns VBox status code.
1198 * @param pVCpu The cross context virtual CPU structure.
1199 */
1200DECL_FORCE_INLINE(int) iemVmxCommitCurrentVmcsToMemory(PVMCPU pVCpu)
1201{
1202 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1203 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
1204 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));
1205 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
1206 return rc;
1207}
1208
1209
1210/**
1211 * Implements VMSucceed for the VMREAD instruction and increments the guest RIP.
1212 *
1213 * @param pVCpu The cross context virtual CPU structure.
1214 */
1215DECL_FORCE_INLINE(void) iemVmxVmreadSuccess(PVMCPU pVCpu, uint8_t cbInstr)
1216{
1217 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Success;
1218 iemVmxVmSucceed(pVCpu);
1219 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1220}
1221
1222
1223/**
1224 * VMREAD common (memory/register) instruction execution worker
1225 *
1226 * @param pVCpu The cross context virtual CPU structure.
1227 * @param cbInstr The instruction length.
1228 * @param pu64Dst Where to write the VMCS value (only updated when
1229 * VINF_SUCCESS is returned).
1230 * @param u64FieldEnc The VMCS field encoding.
1231 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1232 * be NULL.
1233 */
1234IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
1235 PCVMXVEXITINFO pExitInfo)
1236{
1237 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1238 {
1239 RT_NOREF(pExitInfo); RT_NOREF(cbInstr);
1240 /** @todo NSTVMX: intercept. */
1241 /** @todo NSTVMX: VMCS shadowing intercept (VMREAD bitmap). */
1242 }
1243
1244 /* CPL. */
1245 if (pVCpu->iem.s.uCpl > 0)
1246 {
1247 Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1248 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Cpl;
1249 return iemRaiseGeneralProtectionFault0(pVCpu);
1250 }
1251
1252 /* VMCS pointer in root mode. */
1253 if ( IEM_IS_VMX_ROOT_MODE(pVCpu)
1254 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1255 {
1256 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
1257 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrInvalid;
1258 iemVmxVmFailInvalid(pVCpu);
1259 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1260 return VINF_SUCCESS;
1261 }
1262
1263 /* VMCS-link pointer in non-root mode. */
1264 if ( IEM_IS_VMX_NON_ROOT_MODE(pVCpu)
1265 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
1266 {
1267 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
1268 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_LinkPtrInvalid;
1269 iemVmxVmFailInvalid(pVCpu);
1270 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1271 return VINF_SUCCESS;
1272 }
1273
1274 /* Supported VMCS field. */
1275 if (iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
1276 {
1277 Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
1278 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_FieldInvalid;
1279 iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
1280 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1281 return VINF_SUCCESS;
1282 }
1283
1284 /*
1285 * Setup reading from the current or shadow VMCS.
1286 */
1287 uint8_t *pbVmcs;
1288 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1289 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
1290 else
1291 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1292 Assert(pbVmcs);
1293
1294 VMXVMCSFIELDENC FieldEnc;
1295 FieldEnc.u = RT_LO_U32(u64FieldEnc);
1296 uint8_t const uWidth = FieldEnc.n.u2Width;
1297 uint8_t const uType = FieldEnc.n.u2Type;
1298 uint8_t const uWidthType = (uWidth << 2) | uType;
1299 uint8_t const uIndex = FieldEnc.n.u8Index;
1300 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
1301 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
1302
1303 /*
1304 * Read the VMCS component based on the field's effective width.
1305 *
1306 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
1307 * indicates high bits (little endian).
1308 *
1309 * Note! The caller is responsible to trim the result and update registers
1310 * or memory locations are required. Here we just zero-extend to the largest
1311 * type (i.e. 64-bits).
1312 */
1313 uint8_t *pbField = pbVmcs + offField;
1314 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
1315 switch (uEffWidth)
1316 {
1317 case VMX_VMCS_ENC_WIDTH_64BIT:
1318 case VMX_VMCS_ENC_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break;
1319 case VMX_VMCS_ENC_WIDTH_32BIT: *pu64Dst = *(uint32_t *)pbField; break;
1320 case VMX_VMCS_ENC_WIDTH_16BIT: *pu64Dst = *(uint16_t *)pbField; break;
1321 }
1322 return VINF_SUCCESS;
1323}
1324
1325
1326/**
1327 * VMREAD (64-bit register) instruction execution worker.
1328 *
1329 * @param pVCpu The cross context virtual CPU structure.
1330 * @param cbInstr The instruction length.
1331 * @param pu64Dst Where to store the VMCS field's value.
1332 * @param u64FieldEnc The VMCS field encoding.
1333 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1334 * be NULL.
1335 */
1336IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
1337 PCVMXVEXITINFO pExitInfo)
1338{
1339 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
1340 if (rcStrict == VINF_SUCCESS)
1341 {
1342 iemVmxVmreadSuccess(pVCpu, cbInstr);
1343 return VINF_SUCCESS;
1344 }
1345
1346 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1347 return rcStrict;
1348}
1349
1350
1351/**
1352 * VMREAD (32-bit register) instruction execution worker.
1353 *
1354 * @param pVCpu The cross context virtual CPU structure.
1355 * @param cbInstr The instruction length.
1356 * @param pu32Dst Where to store the VMCS field's value.
1357 * @param u32FieldEnc The VMCS field encoding.
1358 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1359 * be NULL.
1360 */
1361IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPU pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint64_t u32FieldEnc,
1362 PCVMXVEXITINFO pExitInfo)
1363{
1364 uint64_t u64Dst;
1365 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u32FieldEnc, pExitInfo);
1366 if (rcStrict == VINF_SUCCESS)
1367 {
1368 *pu32Dst = u64Dst;
1369 iemVmxVmreadSuccess(pVCpu, cbInstr);
1370 return VINF_SUCCESS;
1371 }
1372
1373 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1374 return rcStrict;
1375}
1376
1377
1378/**
1379 * VMREAD (memory) instruction execution worker.
1380 *
1381 * @param pVCpu The cross context virtual CPU structure.
1382 * @param cbInstr The instruction length.
1383 * @param iEffSeg The effective segment register to use with @a u64Val.
1384 * Pass UINT8_MAX if it is a register access.
1385 * @param enmEffAddrMode The effective addressing mode (only used with memory
1386 * operand).
1387 * @param GCPtrDst The guest linear address to store the VMCS field's
1388 * value.
1389 * @param u64FieldEnc The VMCS field encoding.
1390 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1391 * be NULL.
1392 */
1393IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode,
1394 RTGCPTR GCPtrDst, uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
1395{
1396 uint64_t u64Dst;
1397 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u64FieldEnc, pExitInfo);
1398 if (rcStrict == VINF_SUCCESS)
1399 {
1400 /*
1401 * Write the VMCS field's value to the location specified in guest-memory.
1402 *
1403 * The pointer size depends on the address size (address-size prefix allowed).
1404 * The operand size depends on IA-32e mode (operand-size prefix not allowed).
1405 */
1406 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
1407 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
1408 GCPtrDst &= s_auAddrSizeMasks[enmEffAddrMode];
1409
1410 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1411 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
1412 else
1413 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
1414 if (rcStrict == VINF_SUCCESS)
1415 {
1416 iemVmxVmreadSuccess(pVCpu, cbInstr);
1417 return VINF_SUCCESS;
1418 }
1419
1420 Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
1421 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrMap;
1422 return rcStrict;
1423 }
1424
1425 Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1426 return rcStrict;
1427}
1428
1429
1430/**
1431 * VMWRITE instruction execution worker.
1432 *
1433 * @param pVCpu The cross context virtual CPU structure.
1434 * @param cbInstr The instruction length.
1435 * @param iEffSeg The effective segment register to use with @a u64Val.
1436 * Pass UINT8_MAX if it is a register access.
1437 * @param enmEffAddrMode The effective addressing mode (only used with memory
1438 * operand).
1439 * @param u64Val The value to write (or guest linear address to the
1440 * value), @a iEffSeg will indicate if it's a memory
1441 * operand.
1442 * @param u64FieldEnc The VMCS field encoding.
1443 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1444 * be NULL.
1445 */
1446IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, uint64_t u64Val,
1447 uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
1448{
1449 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1450 {
1451 RT_NOREF(pExitInfo);
1452 /** @todo NSTVMX: intercept. */
1453 /** @todo NSTVMX: VMCS shadowing intercept (VMWRITE bitmap). */
1454 }
1455
1456 /* CPL. */
1457 if (pVCpu->iem.s.uCpl > 0)
1458 {
1459 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1460 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Cpl;
1461 return iemRaiseGeneralProtectionFault0(pVCpu);
1462 }
1463
1464 /* VMCS pointer in root mode. */
1465 if ( IEM_IS_VMX_ROOT_MODE(pVCpu)
1466 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1467 {
1468 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
1469 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrInvalid;
1470 iemVmxVmFailInvalid(pVCpu);
1471 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1472 return VINF_SUCCESS;
1473 }
1474
1475 /* VMCS-link pointer in non-root mode. */
1476 if ( IEM_IS_VMX_NON_ROOT_MODE(pVCpu)
1477 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
1478 {
1479 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
1480 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_LinkPtrInvalid;
1481 iemVmxVmFailInvalid(pVCpu);
1482 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1483 return VINF_SUCCESS;
1484 }
1485
1486 /* If the VMWRITE instruction references memory, access the specified memory operand. */
1487 bool const fIsRegOperand = iEffSeg == UINT8_MAX;
1488 if (!fIsRegOperand)
1489 {
1490 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
1491 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
1492 RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[enmEffAddrMode];
1493
1494 /* Read the value from the specified guest memory location. */
1495 VBOXSTRICTRC rcStrict;
1496 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1497 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
1498 else
1499 {
1500 uint32_t u32Val;
1501 rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, iEffSeg, GCPtrVal);
1502 u64Val = u32Val;
1503 }
1504 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1505 {
1506 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
1507 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrMap;
1508 return rcStrict;
1509 }
1510 }
1511 else
1512 Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
1513
1514 /* Supported VMCS field. */
1515 if (!iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
1516 {
1517 Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
1518 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldInvalid;
1519 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
1520 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1521 return VINF_SUCCESS;
1522 }
1523
1524 /* Read-only VMCS field. */
1525 bool const fReadOnlyField = HMVmxIsVmcsFieldReadOnly(u64FieldEnc);
1526 if ( fReadOnlyField
1527 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
1528 {
1529 Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64FieldEnc));
1530 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldRo;
1531 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
1532 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1533 return VINF_SUCCESS;
1534 }
1535
1536 /*
1537 * Setup writing to the current or shadow VMCS.
1538 */
1539 uint8_t *pbVmcs;
1540 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1541 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
1542 else
1543 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1544 Assert(pbVmcs);
1545
1546 VMXVMCSFIELDENC FieldEnc;
1547 FieldEnc.u = RT_LO_U32(u64FieldEnc);
1548 uint8_t const uWidth = FieldEnc.n.u2Width;
1549 uint8_t const uType = FieldEnc.n.u2Type;
1550 uint8_t const uWidthType = (uWidth << 2) | uType;
1551 uint8_t const uIndex = FieldEnc.n.u8Index;
1552 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
1553 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
1554
1555 /*
1556 * Write the VMCS component based on the field's effective width.
1557 *
1558 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
1559 * indicates high bits (little endian).
1560 */
1561 uint8_t *pbField = pbVmcs + offField;
1562 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
1563 switch (uEffWidth)
1564 {
1565 case VMX_VMCS_ENC_WIDTH_64BIT:
1566 case VMX_VMCS_ENC_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
1567 case VMX_VMCS_ENC_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break;
1568 case VMX_VMCS_ENC_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break;
1569 }
1570
1571 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Success;
1572 iemVmxVmSucceed(pVCpu);
1573 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1574 return VINF_SUCCESS;
1575}
1576
1577
1578/**
1579 * VMCLEAR instruction execution worker.
1580 *
1581 * @param pVCpu The cross context virtual CPU structure.
1582 * @param cbInstr The instruction length.
1583 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
1584 * @param GCPtrVmcs The linear address of the VMCS pointer.
1585 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1586 * be NULL.
1587 *
1588 * @remarks Common VMX instruction checks are already expected to by the caller,
1589 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1590 */
1591IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1592 PCVMXVEXITINFO pExitInfo)
1593{
1594 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1595 {
1596 RT_NOREF(pExitInfo);
1597 /** @todo NSTVMX: intercept. */
1598 }
1599 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1600
1601 /* CPL. */
1602 if (pVCpu->iem.s.uCpl > 0)
1603 {
1604 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1605 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Cpl;
1606 return iemRaiseGeneralProtectionFault0(pVCpu);
1607 }
1608
1609 /* Get the VMCS pointer from the location specified by the source memory operand. */
1610 RTGCPHYS GCPhysVmcs;
1611 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
1612 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1613 {
1614 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
1615 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrMap;
1616 return rcStrict;
1617 }
1618
1619 /* VMCS pointer alignment. */
1620 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
1621 {
1622 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
1623 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAlign;
1624 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1625 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1626 return VINF_SUCCESS;
1627 }
1628
1629 /* VMCS physical-address width limits. */
1630 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
1631 {
1632 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
1633 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrWidth;
1634 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1635 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1636 return VINF_SUCCESS;
1637 }
1638
1639 /* VMCS is not the VMXON region. */
1640 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
1641 {
1642 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
1643 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrVmxon;
1644 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
1645 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1646 return VINF_SUCCESS;
1647 }
1648
1649 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
1650 restriction imposed by our implementation. */
1651 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
1652 {
1653 Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
1654 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAbnormal;
1655 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1656 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1657 return VINF_SUCCESS;
1658 }
1659
1660 /*
1661 * VMCLEAR allows committing and clearing any valid VMCS pointer.
1662 *
1663 * If the current VMCS is the one being cleared, set its state to 'clear' and commit
1664 * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
1665 * to 'clear'.
1666 */
1667 uint8_t const fVmcsStateClear = VMX_V_VMCS_STATE_CLEAR;
1668 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
1669 {
1670 Assert(GCPhysVmcs != NIL_RTGCPHYS); /* Paranoia. */
1671 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
1672 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsStateClear;
1673 iemVmxCommitCurrentVmcsToMemory(pVCpu);
1674 Assert(!IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1675 }
1676 else
1677 {
1678 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPtrVmcs + RT_OFFSETOF(VMXVVMCS, fVmcsState),
1679 (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear));
1680 }
1681
1682 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Success;
1683 iemVmxVmSucceed(pVCpu);
1684 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1685 return rcStrict;
1686}
1687
1688
1689/**
1690 * VMPTRST instruction execution worker.
1691 *
1692 * @param pVCpu The cross context virtual CPU structure.
1693 * @param cbInstr The instruction length.
1694 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
1695 * @param GCPtrVmcs The linear address of where to store the current VMCS
1696 * pointer.
1697 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1698 * be NULL.
1699 *
1700 * @remarks Common VMX instruction checks are already expected to by the caller,
1701 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1702 */
1703IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1704 PCVMXVEXITINFO pExitInfo)
1705{
1706 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1707 {
1708 RT_NOREF(pExitInfo);
1709 /** @todo NSTVMX: intercept. */
1710 }
1711 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1712
1713 /* CPL. */
1714 if (pVCpu->iem.s.uCpl > 0)
1715 {
1716 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1717 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Cpl;
1718 return iemRaiseGeneralProtectionFault0(pVCpu);
1719 }
1720
1721 /* Set the VMCS pointer to the location specified by the destination memory operand. */
1722 AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
1723 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
1724 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1725 {
1726 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Success;
1727 iemVmxVmSucceed(pVCpu);
1728 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1729 return rcStrict;
1730 }
1731
1732 Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1733 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_PtrMap;
1734 return rcStrict;
1735}
1736
1737
1738/**
1739 * VMPTRLD instruction execution worker.
1740 *
1741 * @param pVCpu The cross context virtual CPU structure.
1742 * @param cbInstr The instruction length.
1743 * @param GCPtrVmcs The linear address of the current VMCS pointer.
1744 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1745 * be NULL.
1746 *
1747 * @remarks Common VMX instruction checks are already expected to by the caller,
1748 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1749 */
1750IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1751 PCVMXVEXITINFO pExitInfo)
1752{
1753 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1754 {
1755 RT_NOREF(pExitInfo);
1756 /** @todo NSTVMX: intercept. */
1757 }
1758 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1759
1760 /* CPL. */
1761 if (pVCpu->iem.s.uCpl > 0)
1762 {
1763 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1764 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Cpl;
1765 return iemRaiseGeneralProtectionFault0(pVCpu);
1766 }
1767
1768 /* Get the VMCS pointer from the location specified by the source memory operand. */
1769 RTGCPHYS GCPhysVmcs;
1770 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
1771 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1772 {
1773 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
1774 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrMap;
1775 return rcStrict;
1776 }
1777
1778 /* VMCS pointer alignment. */
1779 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
1780 {
1781 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
1782 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAlign;
1783 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1784 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1785 return VINF_SUCCESS;
1786 }
1787
1788 /* VMCS physical-address width limits. */
1789 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
1790 {
1791 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
1792 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrWidth;
1793 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1794 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1795 return VINF_SUCCESS;
1796 }
1797
1798 /* VMCS is not the VMXON region. */
1799 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
1800 {
1801 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
1802 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrVmxon;
1803 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
1804 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1805 return VINF_SUCCESS;
1806 }
1807
1808 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
1809 restriction imposed by our implementation. */
1810 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
1811 {
1812 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
1813 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAbnormal;
1814 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1815 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1816 return VINF_SUCCESS;
1817 }
1818
1819 /* Read the VMCS revision ID from the VMCS. */
1820 VMXVMCSREVID VmcsRevId;
1821 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
1822 if (RT_FAILURE(rc))
1823 {
1824 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
1825 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrReadPhys;
1826 return rc;
1827 }
1828
1829 /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
1830 also check VMCS shadowing feature. */
1831 if ( VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
1832 || ( VmcsRevId.n.fIsShadowVmcs
1833 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
1834 {
1835 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
1836 {
1837 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
1838 VmcsRevId.n.u31RevisionId));
1839 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_VmcsRevId;
1840 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
1841 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1842 return VINF_SUCCESS;
1843 }
1844
1845 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
1846 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_ShadowVmcs;
1847 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
1848 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1849 return VINF_SUCCESS;
1850 }
1851
1852 /*
1853 * We only maintain only the current VMCS in our virtual CPU context (CPUMCTX). Therefore,
1854 * VMPTRLD shall always flush any existing current VMCS back to guest memory before loading
1855 * a new VMCS as current.
1856 */
1857 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
1858 {
1859 iemVmxCommitCurrentVmcsToMemory(pVCpu);
1860 IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
1861 }
1862 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Success;
1863 iemVmxVmSucceed(pVCpu);
1864 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1865 return VINF_SUCCESS;
1866}
1867
1868
1869/**
1870 * VMXON instruction execution worker.
1871 *
1872 * @param pVCpu The cross context virtual CPU structure.
1873 * @param cbInstr The instruction length.
1874 * @param iEffSeg The effective segment register to use with @a
1875 * GCPtrVmxon.
1876 * @param GCPtrVmxon The linear address of the VMXON pointer.
1877 * @param pExitInfo Pointer to the VM-exit instruction information struct.
1878 * Optional, can be NULL.
1879 *
1880 * @remarks Common VMX instruction checks are already expected to by the caller,
1881 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1882 */
1883IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
1884 PCVMXVEXITINFO pExitInfo)
1885{
1886#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1887 RT_NOREF5(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
1888 return VINF_EM_RAW_EMULATE_INSTR;
1889#else
1890 if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
1891 {
1892 /* CPL. */
1893 if (pVCpu->iem.s.uCpl > 0)
1894 {
1895 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1896 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cpl;
1897 return iemRaiseGeneralProtectionFault0(pVCpu);
1898 }
1899
1900 /* A20M (A20 Masked) mode. */
1901 if (!PGMPhysIsA20Enabled(pVCpu))
1902 {
1903 Log(("vmxon: A20M mode -> #GP(0)\n"));
1904 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_A20M;
1905 return iemRaiseGeneralProtectionFault0(pVCpu);
1906 }
1907
1908 /* CR0 fixed bits. */
1909 bool const fUnrestrictedGuest = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxUnrestrictedGuest;
1910 uint64_t const uCr0Fixed0 = fUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX : VMX_V_CR0_FIXED0;
1911 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
1912 {
1913 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
1914 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed0;
1915 return iemRaiseGeneralProtectionFault0(pVCpu);
1916 }
1917
1918 /* CR4 fixed bits. */
1919 if ((pVCpu->cpum.GstCtx.cr4 & VMX_V_CR4_FIXED0) != VMX_V_CR4_FIXED0)
1920 {
1921 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
1922 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed0;
1923 return iemRaiseGeneralProtectionFault0(pVCpu);
1924 }
1925
1926 /* Feature control MSR's LOCK and VMXON bits. */
1927 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
1928 if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
1929 {
1930 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
1931 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_MsrFeatCtl;
1932 return iemRaiseGeneralProtectionFault0(pVCpu);
1933 }
1934
1935 /* Get the VMXON pointer from the location specified by the source memory operand. */
1936 RTGCPHYS GCPhysVmxon;
1937 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
1938 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1939 {
1940 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
1941 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrMap;
1942 return rcStrict;
1943 }
1944
1945 /* VMXON region pointer alignment. */
1946 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
1947 {
1948 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
1949 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAlign;
1950 iemVmxVmFailInvalid(pVCpu);
1951 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1952 return VINF_SUCCESS;
1953 }
1954
1955 /* VMXON physical-address width limits. */
1956 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
1957 {
1958 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
1959 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrWidth;
1960 iemVmxVmFailInvalid(pVCpu);
1961 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1962 return VINF_SUCCESS;
1963 }
1964
1965 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
1966 restriction imposed by our implementation. */
1967 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
1968 {
1969 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
1970 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAbnormal;
1971 iemVmxVmFailInvalid(pVCpu);
1972 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1973 return VINF_SUCCESS;
1974 }
1975
1976 /* Read the VMCS revision ID from the VMXON region. */
1977 VMXVMCSREVID VmcsRevId;
1978 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
1979 if (RT_FAILURE(rc))
1980 {
1981 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
1982 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrReadPhys;
1983 return rc;
1984 }
1985
1986 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
1987 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
1988 {
1989 /* Revision ID mismatch. */
1990 if (!VmcsRevId.n.fIsShadowVmcs)
1991 {
1992 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
1993 VmcsRevId.n.u31RevisionId));
1994 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmcsRevId;
1995 iemVmxVmFailInvalid(pVCpu);
1996 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1997 return VINF_SUCCESS;
1998 }
1999
2000 /* Shadow VMCS disallowed. */
2001 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
2002 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_ShadowVmcs;
2003 iemVmxVmFailInvalid(pVCpu);
2004 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2005 return VINF_SUCCESS;
2006 }
2007
2008 /*
2009 * Record that we're in VMX operation, block INIT, block and disable A20M.
2010 */
2011 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
2012 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
2013 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
2014 /** @todo NSTVMX: clear address-range monitoring. */
2015 /** @todo NSTVMX: Intel PT. */
2016 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Success;
2017 iemVmxVmSucceed(pVCpu);
2018 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2019# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
2020 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
2021# else
2022 return VINF_SUCCESS;
2023# endif
2024 }
2025 else if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
2026 {
2027 RT_NOREF(pExitInfo);
2028 /** @todo NSTVMX: intercept. */
2029 }
2030
2031 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
2032
2033 /* CPL. */
2034 if (pVCpu->iem.s.uCpl > 0)
2035 {
2036 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
2037 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxRootCpl;
2038 return iemRaiseGeneralProtectionFault0(pVCpu);
2039 }
2040
2041 /* VMXON when already in VMX root mode. */
2042 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
2043 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxAlreadyRoot;
2044 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2045 return VINF_SUCCESS;
2046#endif
2047}
2048
2049
2050/**
2051 * Gets the instruction diagnostic for segment base checks during VM-entry of a
2052 * nested-guest.
2053 *
2054 * @param iSegReg The segment index (X86_SREG_XXX).
2055 */
2056IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBase(unsigned iSegReg)
2057{
2058 switch (iSegReg)
2059 {
2060 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseCs;
2061 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseDs;
2062 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseEs;
2063 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseFs;
2064 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseGs;
2065 default: return kVmxVDiag_Vmentry_GuestSegBaseSs;
2066 }
2067}
2068
2069
2070/**
2071 * Gets the instruction diagnostic for segment base checks during VM-entry of a
2072 * nested-guest that is in Virtual-8086 mode.
2073 *
2074 * @param iSegReg The segment index (X86_SREG_XXX).
2075 */
2076IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBaseV86(unsigned iSegReg)
2077{
2078 switch (iSegReg)
2079 {
2080 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseV86Cs;
2081 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ds;
2082 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseV86Es;
2083 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseV86Fs;
2084 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseV86Gs;
2085 default:
2086 Assert(iSegReg == X86_SREG_SS);
2087 return kVmxVDiag_Vmentry_GuestSegBaseV86Ss;
2088 }
2089}
2090
2091
2092/**
2093 * Gets the instruction diagnostic for segment limit checks during VM-entry of a
2094 * nested-guest that is in Virtual-8086 mode.
2095 *
2096 * @param iSegReg The segment index (X86_SREG_XXX).
2097 */
2098IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegLimitV86(unsigned iSegReg)
2099{
2100 switch (iSegReg)
2101 {
2102 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegLimitV86Cs;
2103 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ds;
2104 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegLimitV86Es;
2105 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegLimitV86Fs;
2106 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegLimitV86Gs;
2107 default:
2108 Assert(iSegReg == X86_SREG_SS);
2109 return kVmxVDiag_Vmentry_GuestSegLimitV86Ss;
2110 }
2111}
2112
2113
2114/**
2115 * Gets the instruction diagnostic for segment attribute checks during VM-entry of a
2116 * nested-guest that is in Virtual-8086 mode.
2117 *
2118 * @param iSegReg The segment index (X86_SREG_XXX).
2119 */
2120IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrV86(unsigned iSegReg)
2121{
2122 switch (iSegReg)
2123 {
2124 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrV86Cs;
2125 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ds;
2126 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrV86Es;
2127 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrV86Fs;
2128 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrV86Gs;
2129 default:
2130 Assert(iSegReg == X86_SREG_SS);
2131 return kVmxVDiag_Vmentry_GuestSegAttrV86Ss;
2132 }
2133}
2134
2135
2136/**
2137 * Gets the instruction diagnostic for segment attributes reserved bits failure
2138 * during VM-entry of a nested-guest.
2139 *
2140 * @param iSegReg The segment index (X86_SREG_XXX).
2141 */
2142IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrRsvd(unsigned iSegReg)
2143{
2144 switch (iSegReg)
2145 {
2146 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdCs;
2147 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdDs;
2148 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrRsvdEs;
2149 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdFs;
2150 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdGs;
2151 default:
2152 Assert(iSegReg == X86_SREG_SS);
2153 return kVmxVDiag_Vmentry_GuestSegAttrRsvdSs;
2154 }
2155}
2156
2157
2158/**
2159 * Gets the instruction diagnostic for segment attributes descriptor-type
2160 * (code/segment or system) failure during VM-entry of a nested-guest.
2161 *
2162 * @param iSegReg The segment index (X86_SREG_XXX).
2163 */
2164IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDescType(unsigned iSegReg)
2165{
2166 switch (iSegReg)
2167 {
2168 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeCs;
2169 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeDs;
2170 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeEs;
2171 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeFs;
2172 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeGs;
2173 default:
2174 Assert(iSegReg == X86_SREG_SS);
2175 return kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs;
2176 }
2177}
2178
2179
2180/**
2181 * Gets the instruction diagnostic for segment attributes descriptor-type
2182 * (code/segment or system) failure during VM-entry of a nested-guest.
2183 *
2184 * @param iSegReg The segment index (X86_SREG_XXX).
2185 */
2186IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrPresent(unsigned iSegReg)
2187{
2188 switch (iSegReg)
2189 {
2190 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrPresentCs;
2191 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrPresentDs;
2192 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrPresentEs;
2193 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrPresentFs;
2194 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrPresentGs;
2195 default:
2196 Assert(iSegReg == X86_SREG_SS);
2197 return kVmxVDiag_Vmentry_GuestSegAttrPresentSs;
2198 }
2199}
2200
2201
2202/**
2203 * Gets the instruction diagnostic for segment attribute granularity failure during
2204 * VM-entry of a nested-guest.
2205 *
2206 * @param iSegReg The segment index (X86_SREG_XXX).
2207 */
2208IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrGran(unsigned iSegReg)
2209{
2210 switch (iSegReg)
2211 {
2212 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrGranCs;
2213 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrGranDs;
2214 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrGranEs;
2215 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrGranFs;
2216 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrGranGs;
2217 default:
2218 Assert(iSegReg == X86_SREG_SS);
2219 return kVmxVDiag_Vmentry_GuestSegAttrGranSs;
2220 }
2221}
2222
2223/**
2224 * Gets the instruction diagnostic for segment attribute DPL/RPL failure during
2225 * VM-entry of a nested-guest.
2226 *
2227 * @param iSegReg The segment index (X86_SREG_XXX).
2228 */
2229IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDplRpl(unsigned iSegReg)
2230{
2231 switch (iSegReg)
2232 {
2233 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplCs;
2234 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplDs;
2235 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDplRplEs;
2236 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplFs;
2237 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplGs;
2238 default:
2239 Assert(iSegReg == X86_SREG_SS);
2240 return kVmxVDiag_Vmentry_GuestSegAttrDplRplSs;
2241 }
2242}
2243
2244
2245/**
2246 * Gets the instruction diagnostic for segment attribute type accessed failure
2247 * during VM-entry of a nested-guest.
2248 *
2249 * @param iSegReg The segment index (X86_SREG_XXX).
2250 */
2251IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrTypeAcc(unsigned iSegReg)
2252{
2253 switch (iSegReg)
2254 {
2255 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccCs;
2256 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccDs;
2257 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccEs;
2258 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccFs;
2259 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccGs;
2260 default:
2261 Assert(iSegReg == X86_SREG_SS);
2262 return kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs;
2263 }
2264}
2265
2266
2267/**
2268 * Checks guest control registers, debug registers and MSRs as part of VM-entry.
2269 *
2270 * @param pVCpu The cross context virtual CPU structure.
2271 * @param pszInstr The VMX instruction name (for logging purposes).
2272 */
2273IEM_STATIC int iemVmxVmentryCheckGuestControlRegsMsrs(PVMCPU pVCpu, const char *pszInstr)
2274{
2275 /*
2276 * Guest Control Registers, Debug Registers, and MSRs.
2277 * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs".
2278 */
2279 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2280 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
2281 const char *const pszFailure = "VM-exit";
2282
2283 /* CR0 reserved bits. */
2284 {
2285 /* CR0 MB1 bits. */
2286 uint64_t u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
2287 Assert(u64Cr0Fixed0 & (X86_CR0_NW | X86_CR0_CD));
2288 if (fUnrestrictedGuest)
2289 u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
2290 if (~pVmcs->u64GuestCr0.u & u64Cr0Fixed0)
2291 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed0);
2292
2293 /* CR0 MBZ bits. */
2294 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
2295 if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1)
2296 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1);
2297
2298 /* Without unrestricted guest support, VT-x supports does not support unpaged protected mode. */
2299 if ( !fUnrestrictedGuest
2300 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)
2301 && !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
2302 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0PgPe);
2303 }
2304
2305 /* CR4 reserved bits. */
2306 {
2307 /* CR4 MB1 bits. */
2308 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
2309 if (~pVmcs->u64GuestCr4.u & u64Cr4Fixed0)
2310 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0);
2311
2312 /* CR4 MBZ bits. */
2313 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
2314 if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1)
2315 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1);
2316 }
2317
2318 /* DEBUGCTL MSR. */
2319 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
2320 && (pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL))
2321 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDebugCtl);
2322
2323 /* 64-bit CPU checks. */
2324 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
2325 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2326 {
2327 if (fGstInLongMode)
2328 {
2329 /* PAE must be set. */
2330 if ( (pVmcs->u64GuestCr0.u & X86_CR0_PG)
2331 && (pVmcs->u64GuestCr0.u & X86_CR4_PAE))
2332 { /* likely */ }
2333 else
2334 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPae);
2335 }
2336 else
2337 {
2338 /* PCIDE should not be set. */
2339 if (!(pVmcs->u64GuestCr4.u & X86_CR4_PCIDE))
2340 { /* likely */ }
2341 else
2342 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPcide);
2343 }
2344
2345 /* CR3. */
2346 if (!(pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
2347 { /* likely */ }
2348 else
2349 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr3);
2350
2351 /* DR7. */
2352 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
2353 && (pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK))
2354 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDr7);
2355
2356 /* SYSENTER ESP and SYSENTER EIP. */
2357 if ( X86_IS_CANONICAL(pVmcs->u64GuestSysenterEsp.u)
2358 && X86_IS_CANONICAL(pVmcs->u64GuestSysenterEip.u))
2359 { /* likely */ }
2360 else
2361 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSysenterEspEip);
2362 }
2363
2364 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)); /* We don't support loading IA32_PERF_GLOBAL_CTRL MSR yet. */
2365
2366 /* PAT MSR. */
2367 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
2368 && !CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u))
2369 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPatMsr);
2370
2371 /* EFER MSR. */
2372 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
2373 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
2374 && (pVmcs->u64GuestEferMsr.u & ~uValidEferMask))
2375 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsrRsvd);
2376
2377 bool const fGstLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
2378 bool const fGstLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
2379 if ( fGstInLongMode == fGstLma
2380 && ( !(pVmcs->u64GuestCr0.u & X86_CR0_PG)
2381 || fGstLma == fGstLme))
2382 { /* likely */ }
2383 else
2384 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsr);
2385
2386 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR)); /* We don't support loading IA32_BNDCFGS MSR yet. */
2387
2388 NOREF(pszInstr);
2389 NOREF(pszFailure);
2390 return VINF_SUCCESS;
2391}
2392
2393
2394/**
2395 * Checks guest segment registers, LDTR and TR as part of VM-entry.
2396 *
2397 * @param pVCpu The cross context virtual CPU structure.
2398 * @param pszInstr The VMX instruction name (for logging purposes).
2399 */
2400IEM_STATIC int iemVmxVmentryCheckGuestSegRegs(PVMCPU pVCpu, const char *pszInstr)
2401{
2402 /*
2403 * Segment registers.
2404 * See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2405 */
2406 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2407 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);
2408 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
2409 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
2410 const char *const pszFailure = "VM-exit";
2411
2412 /* Selectors. */
2413 if ( !fGstInV86Mode
2414 && !fUnrestrictedGuest
2415 && (pVmcs->GuestSs & X86_SEL_RPL) != (pVmcs->GuestCs & X86_SEL_RPL))
2416 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelCsSsRpl);
2417
2418 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
2419 {
2420 CPUMSELREG SelReg;
2421 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &SelReg);
2422 if (RT_LIKELY(rc == VINF_SUCCESS))
2423 { /* likely */ }
2424 else
2425 return rc;
2426
2427 /*
2428 * Virtual-8086 mode checks.
2429 */
2430 if (fGstInV86Mode)
2431 {
2432 /* Base address. */
2433 if (SelReg.u64Base == (uint64_t)SelReg.Sel << 4)
2434 { /* likely */ }
2435 else
2436 {
2437 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBaseV86(iSegReg);
2438 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2439 }
2440
2441 /* Limit. */
2442 if (SelReg.u32Limit == 0xffff)
2443 { /* likely */ }
2444 else
2445 {
2446 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegLimitV86(iSegReg);
2447 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2448 }
2449
2450 /* Attribute. */
2451 if (SelReg.Attr.u == 0xf3)
2452 { /* likely */ }
2453 else
2454 {
2455 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrV86(iSegReg);
2456 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2457 }
2458
2459 /* We're done; move to checking the next segment. */
2460 continue;
2461 }
2462
2463 /* Checks done by 64-bit CPUs. */
2464 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2465 {
2466 /* Base address. */
2467 if ( iSegReg == X86_SREG_FS
2468 || iSegReg == X86_SREG_GS)
2469 {
2470 if (X86_IS_CANONICAL(SelReg.u64Base))
2471 { /* likely */ }
2472 else
2473 {
2474 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
2475 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2476 }
2477 }
2478 else if (iSegReg == X86_SREG_CS)
2479 {
2480 if (!RT_HI_U32(SelReg.u64Base))
2481 { /* likely */ }
2482 else
2483 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseCs);
2484 }
2485 else
2486 {
2487 if ( SelReg.Attr.n.u1Unusable
2488 || !RT_HI_U32(SelReg.u64Base))
2489 { /* likely */ }
2490 else
2491 {
2492 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
2493 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2494 }
2495 }
2496 }
2497
2498 /*
2499 * Checks outside Virtual-8086 mode.
2500 */
2501 uint8_t const uSegType = SelReg.Attr.n.u4Type;
2502 uint8_t const fCodeDataSeg = SelReg.Attr.n.u1DescType;
2503 uint8_t const fUsable = !SelReg.Attr.n.u1Unusable;
2504 uint8_t const uDpl = SelReg.Attr.n.u2Dpl;
2505 uint8_t const fPresent = SelReg.Attr.n.u1Present;
2506 uint8_t const uGranularity = SelReg.Attr.n.u1Granularity;
2507 uint8_t const uDefBig = SelReg.Attr.n.u1DefBig;
2508 uint8_t const fSegLong = SelReg.Attr.n.u1Long;
2509
2510 /* Code or usable segment. */
2511 if ( iSegReg == X86_SREG_CS
2512 || fUsable)
2513 {
2514 /* Reserved bits (bits 31:17 and bits 11:8). */
2515 if (!(SelReg.Attr.u & 0xfffe0f00))
2516 { /* likely */ }
2517 else
2518 {
2519 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrRsvd(iSegReg);
2520 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2521 }
2522
2523 /* Descriptor type. */
2524 if (fCodeDataSeg)
2525 { /* likely */ }
2526 else
2527 {
2528 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDescType(iSegReg);
2529 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2530 }
2531
2532 /* Present. */
2533 if (fPresent)
2534 { /* likely */ }
2535 else
2536 {
2537 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrPresent(iSegReg);
2538 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2539 }
2540
2541 /* Granularity. */
2542 if ( ((SelReg.u32Limit & 0x00000fff) == 0x00000fff || !uGranularity)
2543 && ((SelReg.u32Limit & 0xfff00000) == 0x00000000 || uGranularity))
2544 { /* likely */ }
2545 else
2546 {
2547 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrGran(iSegReg);
2548 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2549 }
2550 }
2551
2552 if (iSegReg == X86_SREG_CS)
2553 {
2554 /* Segment Type and DPL. */
2555 if ( uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
2556 && fUnrestrictedGuest)
2557 {
2558 if (uDpl == 0)
2559 { /* likely */ }
2560 else
2561 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplZero);
2562 }
2563 else if ( uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_ACCESSED)
2564 || uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
2565 {
2566 X86DESCATTR SsAttr; SsAttr.u = pVmcs->u32GuestSsAttr;
2567 if (uDpl == SsAttr.n.u2Dpl)
2568 { /* likely */ }
2569 else
2570 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplEqSs);
2571 }
2572 else if ((uSegType & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
2573 == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
2574 {
2575 X86DESCATTR SsAttr; SsAttr.u = pVmcs->u32GuestSsAttr;
2576 if (uDpl <= SsAttr.n.u2Dpl)
2577 { /* likely */ }
2578 else
2579 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplLtSs);
2580 }
2581 else
2582 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsType);
2583
2584 /* Def/Big. */
2585 if ( fGstInLongMode
2586 && fSegLong)
2587 {
2588 if (uDefBig == 0)
2589 { /* likely */ }
2590 else
2591 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDefBig);
2592 }
2593 }
2594 else if (iSegReg == X86_SREG_SS)
2595 {
2596 /* Segment Type. */
2597 if ( !fUsable
2598 || uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
2599 || uSegType == (X86_SEL_TYPE_DOWN | X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED))
2600 { /* likely */ }
2601 else
2602 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsType);
2603
2604 /* DPL. */
2605 if (fUnrestrictedGuest)
2606 {
2607 if (uDpl == (SelReg.Sel & X86_SEL_RPL))
2608 { /* likely */ }
2609 else
2610 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplEqRpl);
2611 }
2612 X86DESCATTR CsAttr; CsAttr.u = pVmcs->u32GuestCsAttr;
2613 if ( CsAttr.n.u4Type == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
2614 || (pVmcs->u64GuestCr0.u & X86_CR0_PE))
2615 {
2616 if (uDpl == 0)
2617 { /* likely */ }
2618 else
2619 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplZero);
2620 }
2621 }
2622 else
2623 {
2624 /* DS, ES, FS, GS. */
2625 if (fUsable)
2626 {
2627 /* Segment type. */
2628 if (uSegType & X86_SEL_TYPE_ACCESSED)
2629 { /* likely */ }
2630 else
2631 {
2632 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrTypeAcc(iSegReg);
2633 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2634 }
2635
2636 if ( !(uSegType & X86_SEL_TYPE_CODE)
2637 || (uSegType & X86_SEL_TYPE_READ))
2638 { /* likely */ }
2639 else
2640 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsTypeRead);
2641
2642 /* DPL. */
2643 if ( !fUnrestrictedGuest
2644 && uSegType <= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
2645 {
2646 if (uDpl >= (SelReg.Sel & X86_SEL_RPL))
2647 { /* likely */ }
2648 else
2649 {
2650 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDplRpl(iSegReg);
2651 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2652 }
2653 }
2654 }
2655 }
2656 }
2657
2658 /*
2659 * LDTR.
2660 */
2661 {
2662 CPUMSELREG Ldtr;
2663 Ldtr.Sel = pVmcs->GuestLdtr;
2664 Ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
2665 Ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
2666 Ldtr.Attr.u = pVmcs->u32GuestLdtrLimit;
2667
2668 if (!Ldtr.Attr.n.u1Unusable)
2669 {
2670 /* Selector. */
2671 if (!(Ldtr.Sel & X86_SEL_LDT))
2672 { /* likely */ }
2673 else
2674 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelLdtr);
2675
2676 /* Base. */
2677 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2678 {
2679 if (X86_IS_CANONICAL(Ldtr.u64Base))
2680 { /* likely */ }
2681 else
2682 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseLdtr);
2683 }
2684
2685 /* Attributes. */
2686 /* Reserved bits (bits 31:17 and bits 11:8). */
2687 if (!(Ldtr.Attr.u & 0xfffe0f00))
2688 { /* likely */ }
2689 else
2690 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrRsvd);
2691
2692 if (Ldtr.Attr.n.u4Type == X86_SEL_TYPE_SYS_LDT)
2693 { /* likely */ }
2694 else
2695 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrType);
2696
2697 if (!Ldtr.Attr.n.u1DescType)
2698 { /* likely */ }
2699 else
2700 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrDescType);
2701
2702 if (Ldtr.Attr.n.u1Present)
2703 { /* likely */ }
2704 else
2705 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrPresent);
2706
2707 if ( ((Ldtr.u32Limit & 0x00000fff) == 0x00000fff || !Ldtr.Attr.n.u1Granularity)
2708 && ((Ldtr.u32Limit & 0xfff00000) == 0x00000000 || Ldtr.Attr.n.u1Granularity))
2709 { /* likely */ }
2710 else
2711 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrGran);
2712 }
2713 }
2714
2715 /*
2716 * TR.
2717 */
2718 {
2719 CPUMSELREG Tr;
2720 Tr.Sel = pVmcs->GuestTr;
2721 Tr.u32Limit = pVmcs->u32GuestTrLimit;
2722 Tr.u64Base = pVmcs->u64GuestTrBase.u;
2723 Tr.Attr.u = pVmcs->u32GuestTrLimit;
2724
2725 /* Selector. */
2726 if (!(Tr.Sel & X86_SEL_LDT))
2727 { /* likely */ }
2728 else
2729 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelTr);
2730
2731 /* Base. */
2732 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2733 {
2734 if (X86_IS_CANONICAL(Tr.u64Base))
2735 { /* likely */ }
2736 else
2737 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseTr);
2738 }
2739
2740 /* Attributes. */
2741 /* Reserved bits (bits 31:17 and bits 11:8). */
2742 if (!(Tr.Attr.u & 0xfffe0f00))
2743 { /* likely */ }
2744 else
2745 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrRsvd);
2746
2747 if (!Tr.Attr.n.u1Unusable)
2748 { /* likely */ }
2749 else
2750 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrUnusable);
2751
2752 if ( Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY
2753 || ( !fGstInLongMode
2754 && Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY))
2755 { /* likely */ }
2756 else
2757 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrType);
2758
2759 if (!Tr.Attr.n.u1DescType)
2760 { /* likely */ }
2761 else
2762 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrDescType);
2763
2764 if (Tr.Attr.n.u1Present)
2765 { /* likely */ }
2766 else
2767 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrPresent);
2768
2769 if ( ((Tr.u32Limit & 0x00000fff) == 0x00000fff || !Tr.Attr.n.u1Granularity)
2770 && ((Tr.u32Limit & 0xfff00000) == 0x00000000 || Tr.Attr.n.u1Granularity))
2771 { /* likely */ }
2772 else
2773 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrGran);
2774 }
2775
2776 NOREF(pszInstr);
2777 NOREF(pszFailure);
2778 return VINF_SUCCESS;
2779}
2780
2781
2782/**
2783 * Checks guest GDTR and IDTR as part of VM-entry.
2784 *
2785 * @param pVCpu The cross context virtual CPU structure.
2786 * @param pszInstr The VMX instruction name (for logging purposes).
2787 */
2788IEM_STATIC int iemVmxVmentryCheckGuestGdtrIdtr(PVMCPU pVCpu, const char *pszInstr)
2789{
2790 /*
2791 * GDTR and IDTR.
2792 * See Intel spec. 26.3.1.3 "Checks on Guest Descriptor-Table Registers".
2793 */
2794 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2795 const char *const pszFailure = "VM-exit";
2796 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2797 {
2798 /* Base. */
2799 if (X86_IS_CANONICAL(pVmcs->u64GuestGdtrBase.u))
2800 { /* likely */ }
2801 else
2802 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrBase);
2803
2804 if (X86_IS_CANONICAL(pVmcs->u64GuestIdtrBase.u))
2805 { /* likely */ }
2806 else
2807 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrBase);
2808 }
2809
2810 /* Limit. */
2811 if (!RT_HI_U16(pVmcs->u32GuestGdtrLimit))
2812 { /* likely */ }
2813 else
2814 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrLimit);
2815
2816 if (!RT_HI_U16(pVmcs->u32GuestIdtrLimit))
2817 { /* likely */ }
2818 else
2819 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrLimit);
2820
2821 NOREF(pszInstr);
2822 NOREF(pszFailure);
2823 return VINF_SUCCESS;
2824}
2825
2826
2827/**
2828 * Checks guest-state as part of VM-entry.
2829 *
2830 * @returns VBox status code.
2831 * @param pVCpu The cross context virtual CPU structure.
2832 * @param pszInstr The VMX instruction name (for logging purposes).
2833 */
2834IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr)
2835{
2836 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
2837 if (rc == VINF_SUCCESS)
2838 { /* likely */ }
2839 else
2840 return rc;
2841
2842 rc = iemVmxVmentryCheckGuestSegRegs(pVCpu, pszInstr);
2843 if (rc == VINF_SUCCESS)
2844 { /* likely */ }
2845 else
2846 return rc;
2847
2848 rc = iemVmxVmentryCheckGuestGdtrIdtr(pVCpu, pszInstr);
2849 if (rc == VINF_SUCCESS)
2850 { /* likely */ }
2851 else
2852 return rc;
2853
2854
2855 return VINF_SUCCESS;
2856}
2857
2858
2859/**
2860 * Checks host-state as part of VM-entry.
2861 *
2862 * @returns VBox status code.
2863 * @param pVCpu The cross context virtual CPU structure.
2864 * @param pszInstr The VMX instruction name (for logging purposes).
2865 */
2866IEM_STATIC int iemVmxVmentryCheckHostState(PVMCPU pVCpu, const char *pszInstr)
2867{
2868 /*
2869 * Host Control Registers and MSRs.
2870 * See Intel spec. 26.2.2 "Checks on Host Control Registers and MSRs".
2871 */
2872 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2873 const char * const pszFailure = "VMFail";
2874
2875 /* CR0 reserved bits. */
2876 {
2877 /* CR0 MB1 bits. */
2878 uint64_t const u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
2879 if (~pVmcs->u64HostCr0.u & u64Cr0Fixed0)
2880 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0);
2881
2882 /* CR0 MBZ bits. */
2883 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
2884 if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1)
2885 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1);
2886 }
2887
2888 /* CR4 reserved bits. */
2889 {
2890 /* CR4 MB1 bits. */
2891 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
2892 if (~pVmcs->u64HostCr4.u & u64Cr4Fixed0)
2893 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0);
2894
2895 /* CR4 MBZ bits. */
2896 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
2897 if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1)
2898 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1);
2899 }
2900
2901 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2902 {
2903 /* CR3 reserved bits. */
2904 if (!(pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
2905 { /* likely */ }
2906 else
2907 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr3);
2908
2909 /* SYSENTER ESP and SYSENTER EIP. */
2910 if ( X86_IS_CANONICAL(pVmcs->u64HostSysenterEsp.u)
2911 && X86_IS_CANONICAL(pVmcs->u64HostSysenterEip.u))
2912 { /* likely */ }
2913 else
2914 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSysenterEspEip);
2915 }
2916
2917 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PERF_MSR)); /* We don't support loading IA32_PERF_GLOBAL_CTRL MSR yet. */
2918
2919 /* PAT MSR. */
2920 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
2921 || CPUMIsPatMsrValid(pVmcs->u64HostPatMsr.u))
2922 { /* likely */ }
2923 else
2924 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostPatMsr);
2925
2926 /* EFER MSR. */
2927 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
2928 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
2929 || !(pVmcs->u64HostEferMsr.u & ~uValidEferMask))
2930 { /* likely */ }
2931 else
2932 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsrRsvd);
2933
2934 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2935 bool const fHostLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
2936 bool const fHostLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
2937 if ( fHostInLongMode == fHostLma
2938 && fHostInLongMode == fHostLme)
2939 { /* likely */ }
2940 else
2941 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsr);
2942
2943 /*
2944 * Host Segment and Descriptor-Table Registers.
2945 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
2946 */
2947 /* Selector RPL and TI. */
2948 if ( !(pVmcs->HostCs & (X86_SEL_RPL | X86_SEL_LDT))
2949 && !(pVmcs->HostSs & (X86_SEL_RPL | X86_SEL_LDT))
2950 && !(pVmcs->HostDs & (X86_SEL_RPL | X86_SEL_LDT))
2951 && !(pVmcs->HostEs & (X86_SEL_RPL | X86_SEL_LDT))
2952 && !(pVmcs->HostFs & (X86_SEL_RPL | X86_SEL_LDT))
2953 && !(pVmcs->HostGs & (X86_SEL_RPL | X86_SEL_LDT))
2954 && !(pVmcs->HostTr & (X86_SEL_RPL | X86_SEL_LDT)))
2955 { /* likely */ }
2956 else
2957 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSel);
2958
2959 /* CS and TR selectors cannot be 0. */
2960 if ( pVmcs->HostCs
2961 && pVmcs->HostTr)
2962 { /* likely */ }
2963 else
2964 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCsTr);
2965
2966 /* SS cannot be 0 if 32-bit host. */
2967 if ( fHostInLongMode
2968 || pVmcs->HostSs)
2969 { /* likely */ }
2970 else
2971 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSs);
2972
2973 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2974 {
2975 /* FS, GS, GDTR, IDTR, TR base address. */
2976 if ( X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
2977 && X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
2978 && X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u)
2979 && X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u)
2980 && X86_IS_CANONICAL(pVmcs->u64HostTrBase.u))
2981 { /* likely */ }
2982 else
2983 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSegBase);
2984 }
2985
2986 /*
2987 * Host address-space size for 64-bit CPUs.
2988 * See Intel spec. 26.2.4 "Checks Related to Address-Space Size".
2989 */
2990 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
2991 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2992 {
2993 bool const fCpuInLongMode = CPUMIsGuestInLongMode(pVCpu);
2994
2995 /* Logical processor in IA-32e mode. */
2996 if (fCpuInLongMode)
2997 {
2998 if (fHostInLongMode)
2999 {
3000 /* PAE must be set. */
3001 if (pVmcs->u64HostCr4.u & X86_CR4_PAE)
3002 { /* likely */ }
3003 else
3004 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pae);
3005
3006 /* RIP must be canonical. */
3007 if (X86_IS_CANONICAL(pVmcs->u64HostRip.u))
3008 { /* likely */ }
3009 else
3010 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRip);
3011 }
3012 else
3013 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostLongMode);
3014 }
3015 else
3016 {
3017 /* Logical processor is outside IA-32e mode. */
3018 if ( !fGstInLongMode
3019 && !fHostInLongMode)
3020 {
3021 /* PCIDE should not be set. */
3022 if (!(pVmcs->u64HostCr4.u & X86_CR4_PCIDE))
3023 { /* likely */ }
3024 else
3025 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pcide);
3026
3027 /* The high 32-bits of RIP MBZ. */
3028 if (!pVmcs->u64HostRip.s.Hi)
3029 { /* likely */ }
3030 else
3031 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRipRsvd);
3032 }
3033 else
3034 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongMode);
3035 }
3036 }
3037 else
3038 {
3039 /* Host address-space size for 32-bit CPUs. */
3040 if ( !fGstInLongMode
3041 && !fHostInLongMode)
3042 { /* likely */ }
3043 else
3044 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongModeNoCpu);
3045 }
3046
3047 NOREF(pszInstr);
3048 NOREF(pszFailure);
3049 return VINF_SUCCESS;
3050}
3051
3052
3053/**
3054 * Checks VM-entry controls fields as part of VM-entry.
3055 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
3056 *
3057 * @returns VBox status code.
3058 * @param pVCpu The cross context virtual CPU structure.
3059 * @param pszInstr The VMX instruction name (for logging purposes).
3060 */
3061IEM_STATIC int iemVmxVmentryCheckEntryCtls(PVMCPU pVCpu, const char *pszInstr)
3062{
3063 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3064 const char * const pszFailure = "VMFail";
3065
3066 /* VM-entry controls. */
3067 VMXCTLSMSR EntryCtls;
3068 EntryCtls.u = CPUMGetGuestIa32VmxEntryCtls(pVCpu);
3069 if (~pVmcs->u32EntryCtls & EntryCtls.n.disallowed0)
3070 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
3071
3072 if (pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1)
3073 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1);
3074
3075 /* Event injection. */
3076 uint32_t const uIntInfo = pVmcs->u32EntryIntInfo;
3077 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VALID))
3078 {
3079 /* Type and vector. */
3080 uint8_t const uType = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_TYPE);
3081 uint8_t const uVector = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VECTOR);
3082 uint8_t const uRsvd = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_RSVD_12_30);
3083 if ( uRsvd == 0
3084 && HMVmxIsEntryIntInfoTypeValid(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxMonitorTrapFlag, uType)
3085 && HMVmxIsEntryIntInfoVectorValid(uVector, uType))
3086 { /* likely */ }
3087 else
3088 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd);
3089
3090 /* Exception error code. */
3091 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID))
3092 {
3093 /* Delivery possible only in Unrestricted-guest mode when CR0.PE is set. */
3094 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
3095 || (pVmcs->u64GuestCr0.s.Lo & X86_CR0_PE))
3096 { /* likely */ }
3097 else
3098 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodePe);
3099
3100 /* Exceptions that provide an error code. */
3101 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
3102 && ( uVector == X86_XCPT_DF
3103 || uVector == X86_XCPT_TS
3104 || uVector == X86_XCPT_NP
3105 || uVector == X86_XCPT_SS
3106 || uVector == X86_XCPT_GP
3107 || uVector == X86_XCPT_PF
3108 || uVector == X86_XCPT_AC))
3109 { /* likely */ }
3110 else
3111 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec);
3112
3113 /* Exception error-code reserved bits. */
3114 if (!(pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK))
3115 { /* likely */ }
3116 else
3117 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd);
3118
3119 /* Injecting a software interrupt, software exception or privileged software exception. */
3120 if ( uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
3121 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
3122 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
3123 {
3124 /* Instruction length must be in the range 0-15. */
3125 if (pVmcs->u32EntryInstrLen <= VMX_ENTRY_INSTR_LEN_MAX)
3126 { /* likely */ }
3127 else
3128 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLen);
3129
3130 /* Instruction length of 0 is allowed only when its CPU feature is present. */
3131 if ( pVmcs->u32EntryInstrLen == 0
3132 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEntryInjectSoftInt)
3133 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLenZero);
3134 }
3135 }
3136 }
3137
3138 /* VM-entry MSR-load count and VM-entry MSR-load area address. */
3139 if (pVmcs->u32EntryMsrLoadCount)
3140 {
3141 if ( (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
3142 || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3143 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
3144 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad);
3145 }
3146
3147 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)); /* We don't support SMM yet. */
3148 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON)); /* We don't support dual-monitor treatment yet. */
3149
3150 NOREF(pszInstr);
3151 NOREF(pszFailure);
3152 return VINF_SUCCESS;
3153}
3154
3155
3156/**
3157 * Checks VM-exit controls fields as part of VM-entry.
3158 * See Intel spec. 26.2.1.2 "VM-Exit Control Fields".
3159 *
3160 * @returns VBox status code.
3161 * @param pVCpu The cross context virtual CPU structure.
3162 * @param pszInstr The VMX instruction name (for logging purposes).
3163 */
3164IEM_STATIC int iemVmxVmentryCheckExitCtls(PVMCPU pVCpu, const char *pszInstr)
3165{
3166 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3167 const char * const pszFailure = "VMFail";
3168
3169 /* VM-exit controls. */
3170 VMXCTLSMSR ExitCtls;
3171 ExitCtls.u = CPUMGetGuestIa32VmxExitCtls(pVCpu);
3172 if (~pVmcs->u32ExitCtls & ExitCtls.n.disallowed0)
3173 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
3174
3175 if (pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1)
3176 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1);
3177
3178 /* Save preemption timer without activating it. */
3179 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
3180 && (pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
3181 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer);
3182
3183 /* VM-exit MSR-store count and VM-exit MSR-store area address. */
3184 if (pVmcs->u32ExitMsrStoreCount)
3185 {
3186 if ( (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
3187 || (pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3188 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
3189 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore);
3190 }
3191
3192 /* VM-exit MSR-load count and VM-exit MSR-load area address. */
3193 if (pVmcs->u32ExitMsrLoadCount)
3194 {
3195 if ( (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
3196 || (pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3197 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
3198 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad);
3199 }
3200
3201 NOREF(pszInstr);
3202 NOREF(pszFailure);
3203 return VINF_SUCCESS;
3204}
3205
3206
3207/**
3208 * Checks VM-execution controls fields as part of VM-entry.
3209 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
3210 *
3211 * @returns VBox status code.
3212 * @param pVCpu The cross context virtual CPU structure.
3213 * @param pszInstr The VMX instruction name (for logging purposes).
3214 *
3215 * @remarks This may update secondary-processor based VM-execution control fields
3216 * in the current VMCS if necessary.
3217 */
3218IEM_STATIC int iemVmxVmentryCheckExecCtls(PVMCPU pVCpu, const char *pszInstr)
3219{
3220 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3221 const char * const pszFailure = "VMFail";
3222
3223 /* Pin-based VM-execution controls. */
3224 {
3225 VMXCTLSMSR PinCtls;
3226 PinCtls.u = CPUMGetGuestIa32VmxPinbasedCtls(pVCpu);
3227 if (~pVmcs->u32PinCtls & PinCtls.n.disallowed0)
3228 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
3229
3230 if (pVmcs->u32PinCtls & ~PinCtls.n.allowed1)
3231 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1);
3232 }
3233
3234 /* Processor-based VM-execution controls. */
3235 {
3236 VMXCTLSMSR ProcCtls;
3237 ProcCtls.u = CPUMGetGuestIa32VmxProcbasedCtls(pVCpu);
3238 if (~pVmcs->u32ProcCtls & ProcCtls.n.disallowed0)
3239 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
3240
3241 if (pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1)
3242 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1);
3243 }
3244
3245 /* Secondary processor-based VM-execution controls. */
3246 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
3247 {
3248 VMXCTLSMSR ProcCtls2;
3249 ProcCtls2.u = CPUMGetGuestIa32VmxProcbasedCtls2(pVCpu);
3250 if (~pVmcs->u32ProcCtls2 & ProcCtls2.n.disallowed0)
3251 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
3252
3253 if (pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1)
3254 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1);
3255 }
3256 else
3257 Assert(!pVmcs->u32ProcCtls2);
3258
3259 /* CR3-target count. */
3260 if (pVmcs->u32Cr3TargetCount <= VMX_V_CR3_TARGET_COUNT)
3261 { /* likely */ }
3262 else
3263 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Cr3TargetCount);
3264
3265 /* IO bitmaps physical addresses. */
3266 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
3267 {
3268 if ( (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK)
3269 || (pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3270 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u))
3271 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA);
3272
3273 if ( (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK)
3274 || (pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3275 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u))
3276 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
3277 }
3278
3279 /* MSR bitmap physical address. */
3280 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
3281 {
3282 if ( (pVmcs->u64AddrMsrBitmap.u & X86_PAGE_4K_OFFSET_MASK)
3283 || (pVmcs->u64AddrMsrBitmap.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3284 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrMsrBitmap.u))
3285 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
3286 }
3287
3288 /* TPR shadow related controls. */
3289 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
3290 {
3291 /* Virtual-APIC page physical address. */
3292 RTGCPHYS GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
3293 if ( (GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
3294 || (GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3295 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
3296 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
3297
3298 /* Read the Virtual-APIC page. */
3299 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
3300 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage),
3301 GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES);
3302 if (RT_FAILURE(rc))
3303 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
3304
3305 /* TPR threshold without virtual-interrupt delivery. */
3306 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
3307 && (pVmcs->u32TprThreshold & VMX_TPR_THRESHOLD_MASK))
3308 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThreshold);
3309
3310 /* TPR threshold and VTPR. */
3311 uint8_t const *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
3312 uint8_t const u8VTpr = *(pbVirtApic + XAPIC_OFF_TPR);
3313 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
3314 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
3315 && RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) > ((u8VTpr >> 4) & UINT32_C(0xf)) /* Bits 4:7 */)
3316 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
3317 }
3318 else
3319 {
3320 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
3321 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
3322 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
3323 { /* likely */ }
3324 else
3325 {
3326 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
3327 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicTprShadow);
3328 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
3329 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ApicRegVirt);
3330 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
3331 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtIntDelivery);
3332 }
3333 }
3334
3335 /* NMI exiting and virtual-NMIs. */
3336 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
3337 && (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
3338 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi);
3339
3340 /* Virtual-NMIs and NMI-window exiting. */
3341 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
3342 && (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
3343 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit);
3344
3345 /* Virtualize APIC accesses. */
3346 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
3347 {
3348 /* APIC-access physical address. */
3349 RTGCPHYS GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
3350 if ( (GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
3351 || (GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3352 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
3353 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
3354 }
3355
3356 /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */
3357 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
3358 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
3359 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
3360
3361 /* Virtual-interrupt delivery requires external interrupt exiting. */
3362 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
3363 && !(pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
3364 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
3365
3366 /* VPID. */
3367 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID)
3368 || pVmcs->u16Vpid != 0)
3369 { /* likely */ }
3370 else
3371 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Vpid);
3372
3373 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_POSTED_INT)); /* We don't support posted interrupts yet. */
3374 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)); /* We don't support EPT yet. */
3375 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PML)); /* We don't support PML yet. */
3376 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)); /* We don't support Unrestricted-guests yet. */
3377 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMFUNC)); /* We don't support VM functions yet. */
3378 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT_VE)); /* We don't support EPT-violation #VE yet. */
3379
3380 /* VMCS shadowing. */
3381 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3382 {
3383 /* VMREAD-bitmap physical address. */
3384 RTGCPHYS GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
3385 if ( ( GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
3386 || ( GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3387 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
3388 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap);
3389
3390 /* VMWRITE-bitmap physical address. */
3391 RTGCPHYS GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
3392 if ( ( GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
3393 || ( GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3394 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
3395 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
3396
3397 /* Read the VMREAD-bitmap. */
3398 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
3399 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap),
3400 GCPhysVmreadBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
3401 if (RT_FAILURE(rc))
3402 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys);
3403
3404 /* Read the VMWRITE-bitmap. */
3405 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap));
3406 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap),
3407 GCPhysVmwriteBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
3408 if (RT_FAILURE(rc))
3409 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys);
3410 }
3411
3412 NOREF(pszInstr);
3413 NOREF(pszFailure);
3414 return VINF_SUCCESS;
3415}
3416
3417
3418/**
3419 * VMLAUNCH/VMRESUME instruction execution worker.
3420 *
3421 * @param pVCpu The cross context virtual CPU structure.
3422 * @param cbInstr The instruction length.
3423 * @param uInstrId The instruction identity (either VMXINSTRID_VMLAUNCH or
3424 * VMXINSTRID_VMRESUME).
3425 * @param pExitInfo Pointer to the VM-exit instruction information struct.
3426 * Optional, can be NULL.
3427 *
3428 * @remarks Common VMX instruction checks are already expected to by the caller,
3429 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
3430 */
3431IEM_STATIC VBOXSTRICTRC iemVmxVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId, PCVMXVEXITINFO pExitInfo)
3432{
3433 Assert( uInstrId == VMXINSTRID_VMLAUNCH
3434 || uInstrId == VMXINSTRID_VMRESUME);
3435
3436 const char *pszInstr = uInstrId == VMXINSTRID_VMLAUNCH ? "vmlaunch" : "vmresume";
3437 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
3438 {
3439 RT_NOREF(pExitInfo);
3440 /** @todo NSTVMX: intercept. */
3441 }
3442 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
3443
3444 /* CPL. */
3445 if (pVCpu->iem.s.uCpl > 0)
3446 {
3447 Log(("%s: CPL %u -> #GP(0)\n", pszInstr, pVCpu->iem.s.uCpl));
3448 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_Cpl;
3449 return iemRaiseGeneralProtectionFault0(pVCpu);
3450 }
3451
3452 /* Current VMCS valid. */
3453 if (!IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
3454 {
3455 Log(("%s: VMCS pointer %#RGp invalid -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
3456 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrInvalid;
3457 iemVmxVmFailInvalid(pVCpu);
3458 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3459 return VINF_SUCCESS;
3460 }
3461
3462 /** @todo Distinguish block-by-MOV-SS from block-by-STI. Currently we
3463 * use block-by-STI here which is not quite correct. */
3464 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3465 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
3466 {
3467 Log(("%s: VM entry with events blocked by MOV SS -> VMFail\n", pszInstr));
3468 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_BlocKMovSS;
3469 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_BLOCK_MOVSS);
3470 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3471 return VINF_SUCCESS;
3472 }
3473
3474 if (uInstrId == VMXINSTRID_VMLAUNCH)
3475 {
3476 /* VMLAUNCH with non-clear VMCS. */
3477 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState != VMX_V_VMCS_STATE_CLEAR)
3478 {
3479 Log(("vmlaunch: VMLAUNCH with non-clear VMCS -> VMFail\n"));
3480 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsClear;
3481 iemVmxVmFail(pVCpu, VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS);
3482 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3483 return VINF_SUCCESS;
3484 }
3485 }
3486 else
3487 {
3488 /* VMRESUME with non-launched VMCS. */
3489 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState != VMX_V_VMCS_STATE_LAUNCHED)
3490 {
3491 Log(("vmresume: VMRESUME with non-launched VMCS -> VMFail\n"));
3492 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsLaunch;
3493 iemVmxVmFail(pVCpu, VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS);
3494 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3495 return VINF_SUCCESS;
3496 }
3497 }
3498
3499 /*
3500 * Load the current VMCS.
3501 */
3502 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
3503 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs),
3504 IEM_VMX_GET_CURRENT_VMCS(pVCpu), VMX_V_VMCS_SIZE);
3505 if (RT_FAILURE(rc))
3506 {
3507 Log(("%s: Failed to read VMCS at %#RGp, rc=%Rrc\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu), rc));
3508 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrReadPhys;
3509 return rc;
3510 }
3511
3512 /*
3513 * Check VM-execution control fields.
3514 */
3515 rc = iemVmxVmentryCheckExecCtls(pVCpu, pszInstr);
3516 if (rc == VINF_SUCCESS)
3517 { /* likely */ }
3518 else
3519 {
3520 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
3521 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3522 return VINF_SUCCESS;
3523 }
3524
3525 /*
3526 * Check VM-exit control fields.
3527 */
3528 rc = iemVmxVmentryCheckExitCtls(pVCpu, pszInstr);
3529 if (rc == VINF_SUCCESS)
3530 { /* likely */ }
3531 else
3532 {
3533 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
3534 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3535 return VINF_SUCCESS;
3536 }
3537
3538 /*
3539 * Check VM-entry control fields.
3540 */
3541 rc = iemVmxVmentryCheckEntryCtls(pVCpu, pszInstr);
3542 if (rc == VINF_SUCCESS)
3543 { /* likely */ }
3544 else
3545 {
3546 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
3547 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3548 return VINF_SUCCESS;
3549 }
3550
3551 /*
3552 * Check host-state fields.
3553 */
3554 rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr);
3555 if (rc == VINF_SUCCESS)
3556 { /* likely */ }
3557 else
3558 {
3559 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_HOST_STATE);
3560 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3561 return VINF_SUCCESS;
3562 }
3563
3564 /*
3565 * Check guest-state fields.
3566 */
3567 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
3568 if (rc == VINF_SUCCESS)
3569 { /* likely */ }
3570 else
3571 {
3572 /* VMExit. */
3573 return VINF_SUCCESS;
3574 }
3575
3576
3577 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_Success;
3578 iemVmxVmSucceed(pVCpu);
3579 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3580 return VERR_IEM_IPE_2;
3581}
3582
3583
3584/**
3585 * Implements 'VMXON'.
3586 */
3587IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
3588{
3589 return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
3590}
3591
3592
3593/**
3594 * Implements 'VMXOFF'.
3595 *
3596 * @remarks Common VMX instruction checks are already expected to by the caller,
3597 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
3598 */
3599IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
3600{
3601# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
3602 RT_NOREF2(pVCpu, cbInstr);
3603 return VINF_EM_RAW_EMULATE_INSTR;
3604# else
3605 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
3606 {
3607 /** @todo NSTVMX: intercept. */
3608 }
3609
3610 /* CPL. */
3611 if (pVCpu->iem.s.uCpl > 0)
3612 {
3613 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
3614 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxoff_Cpl;
3615 return iemRaiseGeneralProtectionFault0(pVCpu);
3616 }
3617
3618 /* Dual monitor treatment of SMIs and SMM. */
3619 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
3620 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
3621 {
3622 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
3623 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3624 return VINF_SUCCESS;
3625 }
3626
3627 /*
3628 * Record that we're no longer in VMX root operation, block INIT, block and disable A20M.
3629 */
3630 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
3631 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
3632
3633 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
3634 { /** @todo NSTVMX: Unblock SMI. */ }
3635 /** @todo NSTVMX: Unblock and enable A20M. */
3636 /** @todo NSTVMX: Clear address-range monitoring. */
3637
3638 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxoff_Success;
3639 iemVmxVmSucceed(pVCpu);
3640 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3641# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
3642 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
3643# else
3644 return VINF_SUCCESS;
3645# endif
3646# endif
3647}
3648
3649
3650/**
3651 * Implements 'VMLAUNCH'.
3652 */
3653IEM_CIMPL_DEF_0(iemCImpl_vmlaunch)
3654{
3655 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMLAUNCH, NULL /* pExitInfo */);
3656}
3657
3658
3659/**
3660 * Implements 'VMRESUME'.
3661 */
3662IEM_CIMPL_DEF_0(iemCImpl_vmresume)
3663{
3664 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMRESUME, NULL /* pExitInfo */);
3665}
3666
3667
3668/**
3669 * Implements 'VMPTRLD'.
3670 */
3671IEM_CIMPL_DEF_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
3672{
3673 return iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
3674}
3675
3676
3677/**
3678 * Implements 'VMPTRST'.
3679 */
3680IEM_CIMPL_DEF_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
3681{
3682 return iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
3683}
3684
3685
3686/**
3687 * Implements 'VMCLEAR'.
3688 */
3689IEM_CIMPL_DEF_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
3690{
3691 return iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
3692}
3693
3694
3695/**
3696 * Implements 'VMWRITE' register.
3697 */
3698IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64FieldEnc)
3699{
3700 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, IEMMODE_64BIT /* N/A */, u64Val, u64FieldEnc,
3701 NULL /* pExitInfo */);
3702}
3703
3704
3705/**
3706 * Implements 'VMWRITE' memory.
3707 */
3708IEM_CIMPL_DEF_4(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrVal, uint32_t, u64FieldEnc)
3709{
3710 return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrVal, u64FieldEnc, NULL /* pExitInfo */);
3711}
3712
3713
3714/**
3715 * Implements 'VMREAD' 64-bit register.
3716 */
3717IEM_CIMPL_DEF_2(iemCImpl_vmread64_reg, uint64_t *, pu64Dst, uint64_t, u64FieldEnc)
3718{
3719 return iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, NULL /* pExitInfo */);
3720}
3721
3722
3723/**
3724 * Implements 'VMREAD' 32-bit register.
3725 */
3726IEM_CIMPL_DEF_2(iemCImpl_vmread32_reg, uint32_t *, pu32Dst, uint32_t, u32FieldEnc)
3727{
3728 return iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u32FieldEnc, NULL /* pExitInfo */);
3729}
3730
3731
3732/**
3733 * Implements 'VMREAD' memory.
3734 */
3735IEM_CIMPL_DEF_4(iemCImpl_vmread_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrDst, uint32_t, u64FieldEnc)
3736{
3737 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, u64FieldEnc, NULL /* pExitInfo */);
3738}
3739
3740#endif
3741
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette