VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 74185

Last change on this file since 74185 was 74185, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 vmlaunch/vmresume bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 164.3 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 74185 2018-09-11 04:38:52Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/**
20 * Implements 'VMCALL'.
21 */
22IEM_CIMPL_DEF_0(iemCImpl_vmcall)
23{
24 /** @todo NSTVMX: intercept. */
25
26 /* Join forces with vmmcall. */
27 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
28}
29
30#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
31/**
32 * Map of VMCS field encodings to their virtual-VMCS structure offsets.
33 *
34 * The first array dimension is VMCS field encoding of Width OR'ed with Type and the
35 * second dimension is the Index, see VMXVMCSFIELDENC.
36 */
37uint16_t const g_aoffVmcsMap[16][VMX_V_VMCS_MAX_INDEX + 1] =
38{
39 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
40 {
41 /* 0 */ RT_OFFSETOF(VMXVVMCS, u16Vpid),
42 /* 1 */ RT_OFFSETOF(VMXVVMCS, u16PostIntNotifyVector),
43 /* 2 */ RT_OFFSETOF(VMXVVMCS, u16EptpIndex),
44 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
45 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
46 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
47 },
48 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
49 {
50 /* 0-7 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
51 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
52 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
53 /* 24-25 */ UINT16_MAX, UINT16_MAX
54 },
55 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
56 {
57 /* 0 */ RT_OFFSETOF(VMXVVMCS, GuestEs),
58 /* 1 */ RT_OFFSETOF(VMXVVMCS, GuestCs),
59 /* 2 */ RT_OFFSETOF(VMXVVMCS, GuestSs),
60 /* 3 */ RT_OFFSETOF(VMXVVMCS, GuestDs),
61 /* 4 */ RT_OFFSETOF(VMXVVMCS, GuestFs),
62 /* 5 */ RT_OFFSETOF(VMXVVMCS, GuestGs),
63 /* 6 */ RT_OFFSETOF(VMXVVMCS, GuestLdtr),
64 /* 7 */ RT_OFFSETOF(VMXVVMCS, GuestTr),
65 /* 8 */ RT_OFFSETOF(VMXVVMCS, u16GuestIntStatus),
66 /* 9 */ RT_OFFSETOF(VMXVVMCS, u16PmlIndex),
67 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
68 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
69 },
70 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
71 {
72 /* 0 */ RT_OFFSETOF(VMXVVMCS, HostEs),
73 /* 1 */ RT_OFFSETOF(VMXVVMCS, HostCs),
74 /* 2 */ RT_OFFSETOF(VMXVVMCS, HostSs),
75 /* 3 */ RT_OFFSETOF(VMXVVMCS, HostDs),
76 /* 4 */ RT_OFFSETOF(VMXVVMCS, HostFs),
77 /* 5 */ RT_OFFSETOF(VMXVVMCS, HostGs),
78 /* 6 */ RT_OFFSETOF(VMXVVMCS, HostTr),
79 /* 7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
80 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
81 /* 23-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
82 },
83 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
84 {
85 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapA),
86 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
87 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
88 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64AddrExitMsrStore),
89 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64AddrExitMsrLoad),
90 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64AddrEntryMsrLoad),
91 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
92 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64AddrPml),
93 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64TscOffset),
94 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64AddrVirtApic),
95 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64AddrApicAccess),
96 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64AddrPostedIntDesc),
97 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64VmFuncCtls),
98 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64EptpPtr),
99 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap0),
100 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap1),
101 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap2),
102 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap3),
103 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64AddrEptpList),
104 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmreadBitmap),
105 /* 20 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmwriteBitmap),
106 /* 21 */ RT_OFFSETOF(VMXVVMCS, u64AddrXcptVeInfo),
107 /* 22 */ RT_OFFSETOF(VMXVVMCS, u64AddrXssBitmap),
108 /* 23 */ RT_OFFSETOF(VMXVVMCS, u64AddrEnclsBitmap),
109 /* 24 */ UINT16_MAX,
110 /* 25 */ RT_OFFSETOF(VMXVVMCS, u64TscMultiplier)
111 },
112 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
113 {
114 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64GuestPhysAddr),
115 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
116 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
117 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
118 /* 25 */ UINT16_MAX
119 },
120 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
121 {
122 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
123 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
124 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestPatMsr),
125 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEferMsr),
126 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
127 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte0),
128 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte1),
129 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte2),
130 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte3),
131 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
132 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
133 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
134 },
135 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
136 {
137 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostPatMsr),
138 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostEferMsr),
139 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
140 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
141 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
142 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
143 },
144 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
145 {
146 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32PinCtls),
147 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls),
148 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32XcptBitmap),
149 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMask),
150 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMatch),
151 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32Cr3TargetCount),
152 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32ExitCtls),
153 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
154 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
155 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32EntryCtls),
156 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
157 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32EntryIntInfo),
158 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
159 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32EntryInstrLen),
160 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32TprThreshold),
161 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls2),
162 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32PleGap),
163 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32PleWindow),
164 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
165 },
166 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
167 {
168 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32RoVmInstrError),
169 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32RoExitReason),
170 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32RoExitIntInfo),
171 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32RoExitErrCode),
172 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
173 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
174 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32RoExitInstrLen),
175 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32RoExitInstrInfo),
176 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
177 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
178 /* 24-25 */ UINT16_MAX, UINT16_MAX
179 },
180 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
181 {
182 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
183 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsLimit),
184 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsLimit),
185 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsLimit),
186 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
187 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsLimit),
188 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsLimit),
189 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
190 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrLimit),
191 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
192 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
193 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsAttr),
194 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsAttr),
195 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsAttr),
196 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsAttr),
197 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsAttr),
198 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsAttr),
199 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
200 /* 18 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrAttr),
201 /* 19 */ RT_OFFSETOF(VMXVVMCS, u32GuestIntrState),
202 /* 20 */ RT_OFFSETOF(VMXVVMCS, u32GuestActivityState),
203 /* 21 */ RT_OFFSETOF(VMXVVMCS, u32GuestSmBase),
204 /* 22 */ RT_OFFSETOF(VMXVVMCS, u32GuestSysenterCS),
205 /* 23 */ RT_OFFSETOF(VMXVVMCS, u32PreemptTimer),
206 /* 24-25 */ UINT16_MAX, UINT16_MAX
207 },
208 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
209 {
210 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32HostSysenterCs),
211 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
212 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
213 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
214 /* 25 */ UINT16_MAX
215 },
216 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_CONTROL: */
217 {
218 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64Cr0Mask),
219 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64Cr4Mask),
220 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
221 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
222 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target0),
223 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target1),
224 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target2),
225 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target3),
226 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
227 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
228 /* 24-25 */ UINT16_MAX, UINT16_MAX
229 },
230 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
231 {
232 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64ExitQual),
233 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64IoRcx),
234 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64IoRsi),
235 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64IoRdi),
236 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64IoRip),
237 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestLinearAddr),
238 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
239 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
240 /* 22-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
241 },
242 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
243 {
244 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr0),
245 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr3),
246 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr4),
247 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEsBase),
248 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestCsBase),
249 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestSsBase),
250 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestDsBase),
251 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestFsBase),
252 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestGsBase),
253 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestLdtrBase),
254 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64GuestTrBase),
255 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64GuestGdtrBase),
256 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64GuestIdtrBase),
257 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64GuestDr7),
258 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64GuestRsp),
259 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64GuestRip),
260 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64GuestRFlags),
261 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpt),
262 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
263 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEip),
264 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
265 },
266 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_HOST_STATE: */
267 {
268 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostCr0),
269 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostCr3),
270 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostCr4),
271 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64HostFsBase),
272 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64HostGsBase),
273 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64HostTrBase),
274 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64HostGdtrBase),
275 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64HostIdtrBase),
276 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEsp),
277 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEip),
278 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64HostRsp),
279 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64HostRip),
280 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
281 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
282 }
283};
284
285
286/**
287 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
288 * relative offsets.
289 */
290# ifdef IEM_WITH_CODE_TLB
291# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
292# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
293# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
294# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
295# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
296# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
297# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
298# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
299# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
300# else /* !IEM_WITH_CODE_TLB */
301# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
302 do \
303 { \
304 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
305 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
306 } while (0)
307
308# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
309
310# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
311 do \
312 { \
313 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
314 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
315 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
316 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
317 } while (0)
318
319# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
320 do \
321 { \
322 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
323 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
324 } while (0)
325
326# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
327 do \
328 { \
329 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
330 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
331 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
332 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
333 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
334 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
335 } while (0)
336
337# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
338 do \
339 { \
340 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
341 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
342 } while (0)
343
344# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
345 do \
346 { \
347 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
348 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
349 } while (0)
350
351# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
352 do \
353 { \
354 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
355 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
356 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
357 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
358 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
359 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
360 } while (0)
361# endif /* !IEM_WITH_CODE_TLB */
362
363/** Whether a shadow VMCS is present for the given VCPU. */
364#define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
365
366/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
367#define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u64VmcsLinkPtr.u)
368
369/** Gets the VMXON region pointer. */
370#define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
371
372/** Whether a current VMCS is present for the given VCPU. */
373#define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
374
375/** Gets the guest-physical address of the current VMCS for the given VCPU. */
376#define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
377
378/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
379#define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
380 do \
381 { \
382 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
383 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
384 } while (0)
385
386/** Clears any current VMCS for the given VCPU. */
387#define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
388 do \
389 { \
390 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
391 } while (0)
392
393/** Check the common VMX instruction preconditions.
394 * @note Any changes here, also check if IEMOP_HLP_VMX_INSTR needs updating.
395 */
396#define IEM_VMX_INSTR_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
397 do { \
398 if ( !IEM_IS_REAL_OR_V86_MODE(a_pVCpu) \
399 && ( !IEM_IS_LONG_MODE(a_pVCpu) \
400 || IEM_IS_64BIT_CODE(a_pVCpu))) \
401 { /* likely */ } \
402 else \
403 { \
404 if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \
405 { \
406 Log((a_szInstr ": Real or v8086 mode -> #UD\n")); \
407 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
408 return iemRaiseUndefinedOpcode(a_pVCpu); \
409 } \
410 if (IEM_IS_LONG_MODE(a_pVCpu) && !IEM_IS_64BIT_CODE(a_pVCpu)) \
411 { \
412 Log((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
413 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
414 return iemRaiseUndefinedOpcode(a_pVCpu); \
415 } \
416 } \
417 } while (0)
418
419/** Check for VMX instructions requiring to be in VMX operation.
420 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs udpating. */
421#define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
422 do \
423 { \
424 if (IEM_IS_VMX_ROOT_MODE(a_pVCpu)) \
425 { /* likely */ } \
426 else \
427 { \
428 Log((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
429 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
430 return iemRaiseUndefinedOpcode(a_pVCpu); \
431 } \
432 } while (0)
433
434/** Marks a VM-entry failure with a diagnostic reason, logs and returns. */
435#define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_InsDiag) \
436 do \
437 { \
438 Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_InsDiag), \
439 HMVmxGetDiagDesc(a_InsDiag), (a_pszFailure))); \
440 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_InsDiag); \
441 return VERR_VMX_VMENTRY_FAILED; \
442 } while (0)
443
444
445/**
446 * Returns whether the given VMCS field is valid and supported by our emulation.
447 *
448 * @param pVCpu The cross context virtual CPU structure.
449 * @param u64FieldEnc The VMCS field encoding.
450 *
451 * @remarks This takes into account the CPU features exposed to the guest.
452 */
453IEM_STATIC bool iemVmxIsVmcsFieldValid(PVMCPU pVCpu, uint64_t u64FieldEnc)
454{
455 uint32_t const uFieldEncHi = RT_HI_U32(u64FieldEnc);
456 uint32_t const uFieldEncLo = RT_LO_U32(u64FieldEnc);
457 if (!uFieldEncHi)
458 { /* likely */ }
459 else
460 return false;
461
462 PCCPUMFEATURES pFeat = IEM_GET_GUEST_CPU_FEATURES(pVCpu);
463 switch (uFieldEncLo)
464 {
465 /*
466 * 16-bit fields.
467 */
468 /* Control fields. */
469 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
470 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
471 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
472
473 /* Guest-state fields. */
474 case VMX_VMCS16_GUEST_ES_SEL:
475 case VMX_VMCS16_GUEST_CS_SEL:
476 case VMX_VMCS16_GUEST_SS_SEL:
477 case VMX_VMCS16_GUEST_DS_SEL:
478 case VMX_VMCS16_GUEST_FS_SEL:
479 case VMX_VMCS16_GUEST_GS_SEL:
480 case VMX_VMCS16_GUEST_LDTR_SEL:
481 case VMX_VMCS16_GUEST_TR_SEL:
482 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
483 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
484
485 /* Host-state fields. */
486 case VMX_VMCS16_HOST_ES_SEL:
487 case VMX_VMCS16_HOST_CS_SEL:
488 case VMX_VMCS16_HOST_SS_SEL:
489 case VMX_VMCS16_HOST_DS_SEL:
490 case VMX_VMCS16_HOST_FS_SEL:
491 case VMX_VMCS16_HOST_GS_SEL:
492 case VMX_VMCS16_HOST_TR_SEL: return true;
493
494 /*
495 * 64-bit fields.
496 */
497 /* Control fields. */
498 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
499 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
500 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
501 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
502 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
503 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
504 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
505 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
506 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
507 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
508 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
509 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
510 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
511 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
512 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
513 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
514 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
515 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
516 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
517 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
518 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
519 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
520 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
521 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
522 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
523 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
524 case VMX_VMCS64_CTRL_EPTP_FULL:
525 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
526 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
527 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
528 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
529 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
530 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
531 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
532 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
533 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
534 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
535 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
536 {
537 uint64_t const uVmFuncMsr = CPUMGetGuestIa32VmxVmFunc(pVCpu);
538 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
539 }
540 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
541 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
542 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
543 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
544 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
545 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
546 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
547 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
548 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
549 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
550 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
551 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
552
553 /* Read-only data fields. */
554 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
555 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
556
557 /* Guest-state fields. */
558 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
559 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
560 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
561 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
562 case VMX_VMCS64_GUEST_PAT_FULL:
563 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
564 case VMX_VMCS64_GUEST_EFER_FULL:
565 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
566 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
567 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
568 case VMX_VMCS64_GUEST_PDPTE0_FULL:
569 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
570 case VMX_VMCS64_GUEST_PDPTE1_FULL:
571 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
572 case VMX_VMCS64_GUEST_PDPTE2_FULL:
573 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
574 case VMX_VMCS64_GUEST_PDPTE3_FULL:
575 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
576 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
577 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
578
579 /* Host-state fields. */
580 case VMX_VMCS64_HOST_PAT_FULL:
581 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
582 case VMX_VMCS64_HOST_EFER_FULL:
583 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
584 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
585 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
586
587 /*
588 * 32-bit fields.
589 */
590 /* Control fields. */
591 case VMX_VMCS32_CTRL_PIN_EXEC:
592 case VMX_VMCS32_CTRL_PROC_EXEC:
593 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
594 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
595 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
596 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
597 case VMX_VMCS32_CTRL_EXIT:
598 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
599 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
600 case VMX_VMCS32_CTRL_ENTRY:
601 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
602 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
603 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
604 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
605 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
606 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
607 case VMX_VMCS32_CTRL_PLE_GAP:
608 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
609
610 /* Read-only data fields. */
611 case VMX_VMCS32_RO_VM_INSTR_ERROR:
612 case VMX_VMCS32_RO_EXIT_REASON:
613 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
614 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
615 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
616 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
617 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
618 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
619
620 /* Guest-state fields. */
621 case VMX_VMCS32_GUEST_ES_LIMIT:
622 case VMX_VMCS32_GUEST_CS_LIMIT:
623 case VMX_VMCS32_GUEST_SS_LIMIT:
624 case VMX_VMCS32_GUEST_DS_LIMIT:
625 case VMX_VMCS32_GUEST_FS_LIMIT:
626 case VMX_VMCS32_GUEST_GS_LIMIT:
627 case VMX_VMCS32_GUEST_LDTR_LIMIT:
628 case VMX_VMCS32_GUEST_TR_LIMIT:
629 case VMX_VMCS32_GUEST_GDTR_LIMIT:
630 case VMX_VMCS32_GUEST_IDTR_LIMIT:
631 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
632 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
633 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
634 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
635 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
636 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
637 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
638 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
639 case VMX_VMCS32_GUEST_INT_STATE:
640 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
641 case VMX_VMCS32_GUEST_SMBASE:
642 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
643 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
644
645 /* Host-state fields. */
646 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
647
648 /*
649 * Natural-width fields.
650 */
651 /* Control fields. */
652 case VMX_VMCS_CTRL_CR0_MASK:
653 case VMX_VMCS_CTRL_CR4_MASK:
654 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
655 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
656 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
657 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
658 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
659 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
660
661 /* Read-only data fields. */
662 case VMX_VMCS_RO_EXIT_QUALIFICATION:
663 case VMX_VMCS_RO_IO_RCX:
664 case VMX_VMCS_RO_IO_RSX:
665 case VMX_VMCS_RO_IO_RDI:
666 case VMX_VMCS_RO_IO_RIP:
667 case VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR: return true;
668
669 /* Guest-state fields. */
670 case VMX_VMCS_GUEST_CR0:
671 case VMX_VMCS_GUEST_CR3:
672 case VMX_VMCS_GUEST_CR4:
673 case VMX_VMCS_GUEST_ES_BASE:
674 case VMX_VMCS_GUEST_CS_BASE:
675 case VMX_VMCS_GUEST_SS_BASE:
676 case VMX_VMCS_GUEST_DS_BASE:
677 case VMX_VMCS_GUEST_FS_BASE:
678 case VMX_VMCS_GUEST_GS_BASE:
679 case VMX_VMCS_GUEST_LDTR_BASE:
680 case VMX_VMCS_GUEST_TR_BASE:
681 case VMX_VMCS_GUEST_GDTR_BASE:
682 case VMX_VMCS_GUEST_IDTR_BASE:
683 case VMX_VMCS_GUEST_DR7:
684 case VMX_VMCS_GUEST_RSP:
685 case VMX_VMCS_GUEST_RIP:
686 case VMX_VMCS_GUEST_RFLAGS:
687 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
688 case VMX_VMCS_GUEST_SYSENTER_ESP:
689 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
690
691 /* Host-state fields. */
692 case VMX_VMCS_HOST_CR0:
693 case VMX_VMCS_HOST_CR3:
694 case VMX_VMCS_HOST_CR4:
695 case VMX_VMCS_HOST_FS_BASE:
696 case VMX_VMCS_HOST_GS_BASE:
697 case VMX_VMCS_HOST_TR_BASE:
698 case VMX_VMCS_HOST_GDTR_BASE:
699 case VMX_VMCS_HOST_IDTR_BASE:
700 case VMX_VMCS_HOST_SYSENTER_ESP:
701 case VMX_VMCS_HOST_SYSENTER_EIP:
702 case VMX_VMCS_HOST_RSP:
703 case VMX_VMCS_HOST_RIP: return true;
704 }
705
706 return false;
707}
708
709
710/**
711 * Gets a segment register from the VMCS given its index.
712 *
713 * @returns VBox status code.
714 * @param pVmcs Pointer to the virtual VMCS.
715 * @param iSegReg The index of the segment register (X86_SREG_XXX).
716 * @param pSelReg Where to store the segment register (only updated when
717 * VINF_SUCCESS is returned).
718 *
719 * @remarks Warning! This does not validate the contents of the retreived segment
720 * register.
721 */
722IEM_STATIC int iemVmxVmcsGetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCPUMSELREG pSelReg)
723{
724 Assert(pSelReg);
725 Assert(iSegReg < X86_SREG_COUNT);
726
727 /* Selector. */
728 uint16_t u16Sel;
729 {
730 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
731 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
732 uint8_t const uWidthType = (uWidth << 2) | uType;
733 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
734 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
735 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
736 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
737 uint8_t const *pbField = pbVmcs + offField;
738 u16Sel = *(uint16_t *)pbField;
739 }
740
741 /* Limit. */
742 uint32_t u32Limit;
743 {
744 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
745 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
746 uint8_t const uWidthType = (uWidth << 2) | uType;
747 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
748 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
749 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
750 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
751 uint8_t const *pbField = pbVmcs + offField;
752 u32Limit = *(uint32_t *)pbField;
753 }
754
755 /* Base. */
756 uint64_t u64Base;
757 {
758 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
759 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
760 uint8_t const uWidthType = (uWidth << 2) | uType;
761 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
762 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
763 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
764 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
765 uint8_t const *pbField = pbVmcs + offField;
766 u64Base = *(uint64_t *)pbField;
767 /** @todo NSTVMX: Should we zero out high bits here for 32-bit virtual CPUs? */
768 }
769
770 /* Attributes. */
771 uint32_t u32Attr;
772 {
773 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
774 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
775 uint8_t const uWidthType = (uWidth << 2) | uType;
776 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
777 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
778 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
779 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
780 uint8_t const *pbField = pbVmcs + offField;
781 u32Attr = *(uint32_t *)pbField;
782 }
783
784 pSelReg->Sel = u16Sel;
785 pSelReg->u32Limit = u32Limit;
786 pSelReg->u64Base = u64Base;
787 pSelReg->Attr.u = u32Attr;
788 return VINF_SUCCESS;
789}
790
791
792/**
793 * Gets VM-exit instruction information along with any displacement for an
794 * instruction VM-exit.
795 *
796 * @returns The VM-exit instruction information.
797 * @param pVCpu The cross context virtual CPU structure.
798 * @param uExitReason The VM-exit reason.
799 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_XXX) if
800 * any. Pass VMXINSTRID_NONE otherwise.
801 * @param fPrimaryOpRead If the primary operand of the ModR/M byte (bits 0:3) is
802 * a read or write.
803 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
804 * NULL.
805 */
806IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, bool fPrimaryOpRead,
807 PRTGCPTR pGCPtrDisp)
808{
809 RTGCPTR GCPtrDisp;
810 VMXEXITINSTRINFO ExitInstrInfo;
811 ExitInstrInfo.u = 0;
812
813 /*
814 * Get and parse the ModR/M byte from our decoded opcodes.
815 */
816 uint8_t bRm;
817 uint8_t const offModRm = pVCpu->iem.s.offModRm;
818 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
819 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
820 {
821 /*
822 * ModR/M indicates register addressing.
823 *
824 * The primary/secondary register operands are reported in the iReg1 or iReg2
825 * fields depending on whether it is a read/write form.
826 */
827 uint8_t idxReg1;
828 uint8_t idxReg2;
829 if (fPrimaryOpRead)
830 {
831 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
832 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
833 }
834 else
835 {
836 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
837 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
838 }
839 ExitInstrInfo.All.u2Scaling = 0;
840 ExitInstrInfo.All.iReg1 = idxReg1;
841 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
842 ExitInstrInfo.All.fIsRegOperand = 1;
843 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
844 ExitInstrInfo.All.iSegReg = 0;
845 ExitInstrInfo.All.iIdxReg = 0;
846 ExitInstrInfo.All.fIdxRegInvalid = 1;
847 ExitInstrInfo.All.iBaseReg = 0;
848 ExitInstrInfo.All.fBaseRegInvalid = 1;
849 ExitInstrInfo.All.iReg2 = idxReg2;
850
851 /* Displacement not applicable for register addressing. */
852 GCPtrDisp = 0;
853 }
854 else
855 {
856 /*
857 * ModR/M indicates memory addressing.
858 */
859 uint8_t uScale = 0;
860 bool fBaseRegValid = false;
861 bool fIdxRegValid = false;
862 uint8_t iBaseReg = 0;
863 uint8_t iIdxReg = 0;
864 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
865 {
866 /*
867 * Parse the ModR/M, displacement for 16-bit addressing mode.
868 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
869 */
870 uint16_t u16Disp = 0;
871 uint8_t const offDisp = offModRm + sizeof(bRm);
872 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
873 {
874 /* Displacement without any registers. */
875 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
876 }
877 else
878 {
879 /* Register (index and base). */
880 switch (bRm & X86_MODRM_RM_MASK)
881 {
882 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
883 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
884 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
885 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
886 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
887 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
888 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
889 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
890 }
891
892 /* Register + displacement. */
893 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
894 {
895 case 0: break;
896 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
897 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
898 default:
899 {
900 /* Register addressing, handled at the beginning. */
901 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
902 break;
903 }
904 }
905 }
906
907 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
908 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
909 }
910 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
911 {
912 /*
913 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
914 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
915 */
916 uint32_t u32Disp = 0;
917 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
918 {
919 /* Displacement without any registers. */
920 uint8_t const offDisp = offModRm + sizeof(bRm);
921 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
922 }
923 else
924 {
925 /* Register (and perhaps scale, index and base). */
926 uint8_t offDisp = offModRm + sizeof(bRm);
927 iBaseReg = (bRm & X86_MODRM_RM_MASK);
928 if (iBaseReg == 4)
929 {
930 /* An SIB byte follows the ModR/M byte, parse it. */
931 uint8_t bSib;
932 uint8_t const offSib = offModRm + sizeof(bRm);
933 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
934
935 /* A displacement may follow SIB, update its offset. */
936 offDisp += sizeof(bSib);
937
938 /* Get the scale. */
939 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
940
941 /* Get the index register. */
942 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
943 fIdxRegValid = RT_BOOL(iIdxReg != 4);
944
945 /* Get the base register. */
946 iBaseReg = bSib & X86_SIB_BASE_MASK;
947 fBaseRegValid = true;
948 if (iBaseReg == 5)
949 {
950 if ((bRm & X86_MODRM_MOD_MASK) == 0)
951 {
952 /* Mod is 0 implies a 32-bit displacement with no base. */
953 fBaseRegValid = false;
954 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
955 }
956 else
957 {
958 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
959 iBaseReg = X86_GREG_xBP;
960 }
961 }
962 }
963
964 /* Register + displacement. */
965 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
966 {
967 case 0: /* Handled above */ break;
968 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
969 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
970 default:
971 {
972 /* Register addressing, handled at the beginning. */
973 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
974 break;
975 }
976 }
977 }
978
979 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
980 }
981 else
982 {
983 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
984
985 /*
986 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
987 * See Intel instruction spec. 2.2 "IA-32e Mode".
988 */
989 uint64_t u64Disp = 0;
990 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
991 if (fRipRelativeAddr)
992 {
993 /*
994 * RIP-relative addressing mode.
995 *
996 * The displacment is 32-bit signed implying an offset range of +/-2G.
997 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
998 */
999 uint8_t const offDisp = offModRm + sizeof(bRm);
1000 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
1001 }
1002 else
1003 {
1004 uint8_t offDisp = offModRm + sizeof(bRm);
1005
1006 /*
1007 * Register (and perhaps scale, index and base).
1008 *
1009 * REX.B extends the most-significant bit of the base register. However, REX.B
1010 * is ignored while determining whether an SIB follows the opcode. Hence, we
1011 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
1012 *
1013 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
1014 */
1015 iBaseReg = (bRm & X86_MODRM_RM_MASK);
1016 if (iBaseReg == 4)
1017 {
1018 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
1019 uint8_t bSib;
1020 uint8_t const offSib = offModRm + sizeof(bRm);
1021 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
1022
1023 /* Displacement may follow SIB, update its offset. */
1024 offDisp += sizeof(bSib);
1025
1026 /* Get the scale. */
1027 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
1028
1029 /* Get the index. */
1030 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
1031 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
1032
1033 /* Get the base. */
1034 iBaseReg = (bSib & X86_SIB_BASE_MASK);
1035 fBaseRegValid = true;
1036 if (iBaseReg == 5)
1037 {
1038 if ((bRm & X86_MODRM_MOD_MASK) == 0)
1039 {
1040 /* Mod is 0 implies a signed 32-bit displacement with no base. */
1041 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
1042 }
1043 else
1044 {
1045 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
1046 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
1047 }
1048 }
1049 }
1050 iBaseReg |= pVCpu->iem.s.uRexB;
1051
1052 /* Register + displacement. */
1053 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1054 {
1055 case 0: /* Handled above */ break;
1056 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
1057 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
1058 default:
1059 {
1060 /* Register addressing, handled at the beginning. */
1061 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
1062 break;
1063 }
1064 }
1065 }
1066
1067 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
1068 }
1069
1070 /*
1071 * The primary or secondary register operand is reported in iReg2 depending
1072 * on whether the primary operand is in read/write form.
1073 */
1074 uint8_t idxReg2;
1075 if (fPrimaryOpRead)
1076 {
1077 idxReg2 = bRm & X86_MODRM_RM_MASK;
1078 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
1079 idxReg2 |= pVCpu->iem.s.uRexB;
1080 }
1081 else
1082 {
1083 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK;
1084 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
1085 idxReg2 |= pVCpu->iem.s.uRexReg;
1086 }
1087 ExitInstrInfo.All.u2Scaling = uScale;
1088 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */
1089 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
1090 ExitInstrInfo.All.fIsRegOperand = 0;
1091 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
1092 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
1093 ExitInstrInfo.All.iIdxReg = iIdxReg;
1094 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
1095 ExitInstrInfo.All.iBaseReg = iBaseReg;
1096 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
1097 ExitInstrInfo.All.iReg2 = idxReg2;
1098 }
1099
1100 /*
1101 * Handle exceptions to the norm for certain instructions.
1102 * (e.g. some instructions convey an instruction identity in place of iReg2).
1103 */
1104 switch (uExitReason)
1105 {
1106 case VMX_EXIT_GDTR_IDTR_ACCESS:
1107 {
1108 Assert(VMXINSTRID_IS_VALID(uInstrId));
1109 ExitInstrInfo.GdtIdt.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
1110 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
1111 break;
1112 }
1113
1114 case VMX_EXIT_LDTR_TR_ACCESS:
1115 {
1116 Assert(VMXINSTRID_IS_VALID(uInstrId));
1117 ExitInstrInfo.LdtTr.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
1118 ExitInstrInfo.LdtTr.u2Undef0 = 0;
1119 break;
1120 }
1121
1122 case VMX_EXIT_RDRAND:
1123 case VMX_EXIT_RDSEED:
1124 {
1125 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
1126 break;
1127 }
1128 }
1129
1130 /* Update displacement and return the constructed VM-exit instruction information field. */
1131 if (pGCPtrDisp)
1132 *pGCPtrDisp = GCPtrDisp;
1133 return ExitInstrInfo.u;
1134}
1135
1136
1137/**
1138 * Implements VMSucceed for VMX instruction success.
1139 *
1140 * @param pVCpu The cross context virtual CPU structure.
1141 */
1142DECL_FORCE_INLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
1143{
1144 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1145}
1146
1147
1148/**
1149 * Implements VMFailInvalid for VMX instruction failure.
1150 *
1151 * @param pVCpu The cross context virtual CPU structure.
1152 */
1153DECL_FORCE_INLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
1154{
1155 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1156 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
1157}
1158
1159
1160/**
1161 * Implements VMFailValid for VMX instruction failure.
1162 *
1163 * @param pVCpu The cross context virtual CPU structure.
1164 * @param enmInsErr The VM instruction error.
1165 */
1166DECL_FORCE_INLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1167{
1168 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1169 {
1170 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1171 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
1172 /** @todo NSTVMX: VMWrite enmInsErr to VM-instruction error field. */
1173 RT_NOREF(enmInsErr);
1174 }
1175}
1176
1177
1178/**
1179 * Implements VMFail for VMX instruction failure.
1180 *
1181 * @param pVCpu The cross context virtual CPU structure.
1182 * @param enmInsErr The VM instruction error.
1183 */
1184DECL_FORCE_INLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1185{
1186 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1187 {
1188 iemVmxVmFailValid(pVCpu, enmInsErr);
1189 /** @todo Set VM-instruction error field in the current virtual-VMCS. */
1190 }
1191 else
1192 iemVmxVmFailInvalid(pVCpu);
1193}
1194
1195
1196/**
1197 * Flushes the current VMCS contents back to guest memory.
1198 *
1199 * @returns VBox status code.
1200 * @param pVCpu The cross context virtual CPU structure.
1201 */
1202DECL_FORCE_INLINE(int) iemVmxCommitCurrentVmcsToMemory(PVMCPU pVCpu)
1203{
1204 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1205 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
1206 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));
1207 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
1208 return rc;
1209}
1210
1211
1212/**
1213 * Implements VMSucceed for the VMREAD instruction and increments the guest RIP.
1214 *
1215 * @param pVCpu The cross context virtual CPU structure.
1216 */
1217DECL_FORCE_INLINE(void) iemVmxVmreadSuccess(PVMCPU pVCpu, uint8_t cbInstr)
1218{
1219 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Success;
1220 iemVmxVmSucceed(pVCpu);
1221 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1222}
1223
1224
1225/**
1226 * VMREAD common (memory/register) instruction execution worker
1227 *
1228 * @param pVCpu The cross context virtual CPU structure.
1229 * @param cbInstr The instruction length.
1230 * @param pu64Dst Where to write the VMCS value (only updated when
1231 * VINF_SUCCESS is returned).
1232 * @param u64FieldEnc The VMCS field encoding.
1233 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1234 * be NULL.
1235 */
1236IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
1237 PCVMXVEXITINFO pExitInfo)
1238{
1239 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1240 {
1241 RT_NOREF(pExitInfo); RT_NOREF(cbInstr);
1242 /** @todo NSTVMX: intercept. */
1243 /** @todo NSTVMX: VMCS shadowing intercept (VMREAD bitmap). */
1244 }
1245
1246 /* CPL. */
1247 if (pVCpu->iem.s.uCpl > 0)
1248 {
1249 Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1250 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Cpl;
1251 return iemRaiseGeneralProtectionFault0(pVCpu);
1252 }
1253
1254 /* VMCS pointer in root mode. */
1255 if ( IEM_IS_VMX_ROOT_MODE(pVCpu)
1256 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1257 {
1258 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
1259 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrInvalid;
1260 iemVmxVmFailInvalid(pVCpu);
1261 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1262 return VINF_SUCCESS;
1263 }
1264
1265 /* VMCS-link pointer in non-root mode. */
1266 if ( IEM_IS_VMX_NON_ROOT_MODE(pVCpu)
1267 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
1268 {
1269 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
1270 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_LinkPtrInvalid;
1271 iemVmxVmFailInvalid(pVCpu);
1272 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1273 return VINF_SUCCESS;
1274 }
1275
1276 /* Supported VMCS field. */
1277 if (iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
1278 {
1279 Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
1280 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_FieldInvalid;
1281 iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
1282 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1283 return VINF_SUCCESS;
1284 }
1285
1286 /*
1287 * Setup reading from the current or shadow VMCS.
1288 */
1289 uint8_t *pbVmcs;
1290 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1291 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
1292 else
1293 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1294 Assert(pbVmcs);
1295
1296 VMXVMCSFIELDENC FieldEnc;
1297 FieldEnc.u = RT_LO_U32(u64FieldEnc);
1298 uint8_t const uWidth = FieldEnc.n.u2Width;
1299 uint8_t const uType = FieldEnc.n.u2Type;
1300 uint8_t const uWidthType = (uWidth << 2) | uType;
1301 uint8_t const uIndex = FieldEnc.n.u8Index;
1302 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
1303 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
1304
1305 /*
1306 * Read the VMCS component based on the field's effective width.
1307 *
1308 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
1309 * indicates high bits (little endian).
1310 *
1311 * Note! The caller is responsible to trim the result and update registers
1312 * or memory locations are required. Here we just zero-extend to the largest
1313 * type (i.e. 64-bits).
1314 */
1315 uint8_t *pbField = pbVmcs + offField;
1316 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
1317 switch (uEffWidth)
1318 {
1319 case VMX_VMCS_ENC_WIDTH_64BIT:
1320 case VMX_VMCS_ENC_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break;
1321 case VMX_VMCS_ENC_WIDTH_32BIT: *pu64Dst = *(uint32_t *)pbField; break;
1322 case VMX_VMCS_ENC_WIDTH_16BIT: *pu64Dst = *(uint16_t *)pbField; break;
1323 }
1324 return VINF_SUCCESS;
1325}
1326
1327
1328/**
1329 * VMREAD (64-bit register) instruction execution worker.
1330 *
1331 * @param pVCpu The cross context virtual CPU structure.
1332 * @param cbInstr The instruction length.
1333 * @param pu64Dst Where to store the VMCS field's value.
1334 * @param u64FieldEnc The VMCS field encoding.
1335 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1336 * be NULL.
1337 */
1338IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
1339 PCVMXVEXITINFO pExitInfo)
1340{
1341 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
1342 if (rcStrict == VINF_SUCCESS)
1343 {
1344 iemVmxVmreadSuccess(pVCpu, cbInstr);
1345 return VINF_SUCCESS;
1346 }
1347
1348 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1349 return rcStrict;
1350}
1351
1352
1353/**
1354 * VMREAD (32-bit register) instruction execution worker.
1355 *
1356 * @param pVCpu The cross context virtual CPU structure.
1357 * @param cbInstr The instruction length.
1358 * @param pu32Dst Where to store the VMCS field's value.
1359 * @param u32FieldEnc The VMCS field encoding.
1360 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1361 * be NULL.
1362 */
1363IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPU pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint64_t u32FieldEnc,
1364 PCVMXVEXITINFO pExitInfo)
1365{
1366 uint64_t u64Dst;
1367 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u32FieldEnc, pExitInfo);
1368 if (rcStrict == VINF_SUCCESS)
1369 {
1370 *pu32Dst = u64Dst;
1371 iemVmxVmreadSuccess(pVCpu, cbInstr);
1372 return VINF_SUCCESS;
1373 }
1374
1375 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1376 return rcStrict;
1377}
1378
1379
1380/**
1381 * VMREAD (memory) instruction execution worker.
1382 *
1383 * @param pVCpu The cross context virtual CPU structure.
1384 * @param cbInstr The instruction length.
1385 * @param iEffSeg The effective segment register to use with @a u64Val.
1386 * Pass UINT8_MAX if it is a register access.
1387 * @param enmEffAddrMode The effective addressing mode (only used with memory
1388 * operand).
1389 * @param GCPtrDst The guest linear address to store the VMCS field's
1390 * value.
1391 * @param u64FieldEnc The VMCS field encoding.
1392 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1393 * be NULL.
1394 */
1395IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode,
1396 RTGCPTR GCPtrDst, uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
1397{
1398 uint64_t u64Dst;
1399 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u64FieldEnc, pExitInfo);
1400 if (rcStrict == VINF_SUCCESS)
1401 {
1402 /*
1403 * Write the VMCS field's value to the location specified in guest-memory.
1404 *
1405 * The pointer size depends on the address size (address-size prefix allowed).
1406 * The operand size depends on IA-32e mode (operand-size prefix not allowed).
1407 */
1408 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
1409 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
1410 GCPtrDst &= s_auAddrSizeMasks[enmEffAddrMode];
1411
1412 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1413 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
1414 else
1415 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
1416 if (rcStrict == VINF_SUCCESS)
1417 {
1418 iemVmxVmreadSuccess(pVCpu, cbInstr);
1419 return VINF_SUCCESS;
1420 }
1421
1422 Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
1423 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrMap;
1424 return rcStrict;
1425 }
1426
1427 Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1428 return rcStrict;
1429}
1430
1431
1432/**
1433 * VMWRITE instruction execution worker.
1434 *
1435 * @param pVCpu The cross context virtual CPU structure.
1436 * @param cbInstr The instruction length.
1437 * @param iEffSeg The effective segment register to use with @a u64Val.
1438 * Pass UINT8_MAX if it is a register access.
1439 * @param enmEffAddrMode The effective addressing mode (only used with memory
1440 * operand).
1441 * @param u64Val The value to write (or guest linear address to the
1442 * value), @a iEffSeg will indicate if it's a memory
1443 * operand.
1444 * @param u64FieldEnc The VMCS field encoding.
1445 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1446 * be NULL.
1447 */
1448IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, uint64_t u64Val,
1449 uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
1450{
1451 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1452 {
1453 RT_NOREF(pExitInfo);
1454 /** @todo NSTVMX: intercept. */
1455 /** @todo NSTVMX: VMCS shadowing intercept (VMWRITE bitmap). */
1456 }
1457
1458 /* CPL. */
1459 if (pVCpu->iem.s.uCpl > 0)
1460 {
1461 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1462 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Cpl;
1463 return iemRaiseGeneralProtectionFault0(pVCpu);
1464 }
1465
1466 /* VMCS pointer in root mode. */
1467 if ( IEM_IS_VMX_ROOT_MODE(pVCpu)
1468 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1469 {
1470 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
1471 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrInvalid;
1472 iemVmxVmFailInvalid(pVCpu);
1473 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1474 return VINF_SUCCESS;
1475 }
1476
1477 /* VMCS-link pointer in non-root mode. */
1478 if ( IEM_IS_VMX_NON_ROOT_MODE(pVCpu)
1479 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
1480 {
1481 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
1482 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_LinkPtrInvalid;
1483 iemVmxVmFailInvalid(pVCpu);
1484 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1485 return VINF_SUCCESS;
1486 }
1487
1488 /* If the VMWRITE instruction references memory, access the specified memory operand. */
1489 bool const fIsRegOperand = iEffSeg == UINT8_MAX;
1490 if (!fIsRegOperand)
1491 {
1492 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
1493 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
1494 RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[enmEffAddrMode];
1495
1496 /* Read the value from the specified guest memory location. */
1497 VBOXSTRICTRC rcStrict;
1498 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1499 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
1500 else
1501 {
1502 uint32_t u32Val;
1503 rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, iEffSeg, GCPtrVal);
1504 u64Val = u32Val;
1505 }
1506 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1507 {
1508 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
1509 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrMap;
1510 return rcStrict;
1511 }
1512 }
1513 else
1514 Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
1515
1516 /* Supported VMCS field. */
1517 if (!iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
1518 {
1519 Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
1520 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldInvalid;
1521 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
1522 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1523 return VINF_SUCCESS;
1524 }
1525
1526 /* Read-only VMCS field. */
1527 bool const fReadOnlyField = HMVmxIsVmcsFieldReadOnly(u64FieldEnc);
1528 if ( fReadOnlyField
1529 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
1530 {
1531 Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64FieldEnc));
1532 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldRo;
1533 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
1534 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1535 return VINF_SUCCESS;
1536 }
1537
1538 /*
1539 * Setup writing to the current or shadow VMCS.
1540 */
1541 uint8_t *pbVmcs;
1542 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1543 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
1544 else
1545 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1546 Assert(pbVmcs);
1547
1548 VMXVMCSFIELDENC FieldEnc;
1549 FieldEnc.u = RT_LO_U32(u64FieldEnc);
1550 uint8_t const uWidth = FieldEnc.n.u2Width;
1551 uint8_t const uType = FieldEnc.n.u2Type;
1552 uint8_t const uWidthType = (uWidth << 2) | uType;
1553 uint8_t const uIndex = FieldEnc.n.u8Index;
1554 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
1555 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
1556
1557 /*
1558 * Write the VMCS component based on the field's effective width.
1559 *
1560 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
1561 * indicates high bits (little endian).
1562 */
1563 uint8_t *pbField = pbVmcs + offField;
1564 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
1565 switch (uEffWidth)
1566 {
1567 case VMX_VMCS_ENC_WIDTH_64BIT:
1568 case VMX_VMCS_ENC_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
1569 case VMX_VMCS_ENC_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break;
1570 case VMX_VMCS_ENC_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break;
1571 }
1572
1573 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Success;
1574 iemVmxVmSucceed(pVCpu);
1575 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1576 return VINF_SUCCESS;
1577}
1578
1579
1580/**
1581 * VMCLEAR instruction execution worker.
1582 *
1583 * @param pVCpu The cross context virtual CPU structure.
1584 * @param cbInstr The instruction length.
1585 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
1586 * @param GCPtrVmcs The linear address of the VMCS pointer.
1587 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1588 * be NULL.
1589 *
1590 * @remarks Common VMX instruction checks are already expected to by the caller,
1591 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1592 */
1593IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1594 PCVMXVEXITINFO pExitInfo)
1595{
1596 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1597 {
1598 RT_NOREF(pExitInfo);
1599 /** @todo NSTVMX: intercept. */
1600 }
1601 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1602
1603 /* CPL. */
1604 if (pVCpu->iem.s.uCpl > 0)
1605 {
1606 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1607 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Cpl;
1608 return iemRaiseGeneralProtectionFault0(pVCpu);
1609 }
1610
1611 /* Get the VMCS pointer from the location specified by the source memory operand. */
1612 RTGCPHYS GCPhysVmcs;
1613 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
1614 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1615 {
1616 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
1617 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrMap;
1618 return rcStrict;
1619 }
1620
1621 /* VMCS pointer alignment. */
1622 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
1623 {
1624 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
1625 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAlign;
1626 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1627 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1628 return VINF_SUCCESS;
1629 }
1630
1631 /* VMCS physical-address width limits. */
1632 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
1633 {
1634 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
1635 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrWidth;
1636 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1637 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1638 return VINF_SUCCESS;
1639 }
1640
1641 /* VMCS is not the VMXON region. */
1642 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
1643 {
1644 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
1645 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrVmxon;
1646 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
1647 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1648 return VINF_SUCCESS;
1649 }
1650
1651 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
1652 restriction imposed by our implementation. */
1653 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
1654 {
1655 Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
1656 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAbnormal;
1657 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1658 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1659 return VINF_SUCCESS;
1660 }
1661
1662 /*
1663 * VMCLEAR allows committing and clearing any valid VMCS pointer.
1664 *
1665 * If the current VMCS is the one being cleared, set its state to 'clear' and commit
1666 * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
1667 * to 'clear'.
1668 */
1669 uint8_t const fVmcsStateClear = VMX_V_VMCS_STATE_CLEAR;
1670 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
1671 {
1672 Assert(GCPhysVmcs != NIL_RTGCPHYS); /* Paranoia. */
1673 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
1674 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsStateClear;
1675 iemVmxCommitCurrentVmcsToMemory(pVCpu);
1676 Assert(!IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1677 }
1678 else
1679 {
1680 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPtrVmcs + RT_OFFSETOF(VMXVVMCS, fVmcsState),
1681 (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear));
1682 }
1683
1684 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Success;
1685 iemVmxVmSucceed(pVCpu);
1686 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1687 return rcStrict;
1688}
1689
1690
1691/**
1692 * VMPTRST instruction execution worker.
1693 *
1694 * @param pVCpu The cross context virtual CPU structure.
1695 * @param cbInstr The instruction length.
1696 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
1697 * @param GCPtrVmcs The linear address of where to store the current VMCS
1698 * pointer.
1699 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1700 * be NULL.
1701 *
1702 * @remarks Common VMX instruction checks are already expected to by the caller,
1703 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1704 */
1705IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1706 PCVMXVEXITINFO pExitInfo)
1707{
1708 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1709 {
1710 RT_NOREF(pExitInfo);
1711 /** @todo NSTVMX: intercept. */
1712 }
1713 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1714
1715 /* CPL. */
1716 if (pVCpu->iem.s.uCpl > 0)
1717 {
1718 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1719 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Cpl;
1720 return iemRaiseGeneralProtectionFault0(pVCpu);
1721 }
1722
1723 /* Set the VMCS pointer to the location specified by the destination memory operand. */
1724 AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
1725 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
1726 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1727 {
1728 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Success;
1729 iemVmxVmSucceed(pVCpu);
1730 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1731 return rcStrict;
1732 }
1733
1734 Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1735 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_PtrMap;
1736 return rcStrict;
1737}
1738
1739
1740/**
1741 * VMPTRLD instruction execution worker.
1742 *
1743 * @param pVCpu The cross context virtual CPU structure.
1744 * @param cbInstr The instruction length.
1745 * @param GCPtrVmcs The linear address of the current VMCS pointer.
1746 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1747 * be NULL.
1748 *
1749 * @remarks Common VMX instruction checks are already expected to by the caller,
1750 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1751 */
1752IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1753 PCVMXVEXITINFO pExitInfo)
1754{
1755 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1756 {
1757 RT_NOREF(pExitInfo);
1758 /** @todo NSTVMX: intercept. */
1759 }
1760 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1761
1762 /* CPL. */
1763 if (pVCpu->iem.s.uCpl > 0)
1764 {
1765 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1766 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Cpl;
1767 return iemRaiseGeneralProtectionFault0(pVCpu);
1768 }
1769
1770 /* Get the VMCS pointer from the location specified by the source memory operand. */
1771 RTGCPHYS GCPhysVmcs;
1772 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
1773 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1774 {
1775 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
1776 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrMap;
1777 return rcStrict;
1778 }
1779
1780 /* VMCS pointer alignment. */
1781 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
1782 {
1783 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
1784 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAlign;
1785 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1786 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1787 return VINF_SUCCESS;
1788 }
1789
1790 /* VMCS physical-address width limits. */
1791 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
1792 {
1793 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
1794 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrWidth;
1795 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1796 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1797 return VINF_SUCCESS;
1798 }
1799
1800 /* VMCS is not the VMXON region. */
1801 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
1802 {
1803 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
1804 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrVmxon;
1805 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
1806 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1807 return VINF_SUCCESS;
1808 }
1809
1810 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
1811 restriction imposed by our implementation. */
1812 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
1813 {
1814 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
1815 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAbnormal;
1816 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1817 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1818 return VINF_SUCCESS;
1819 }
1820
1821 /* Read the VMCS revision ID from the VMCS. */
1822 VMXVMCSREVID VmcsRevId;
1823 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
1824 if (RT_FAILURE(rc))
1825 {
1826 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
1827 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrReadPhys;
1828 return rc;
1829 }
1830
1831 /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
1832 also check VMCS shadowing feature. */
1833 if ( VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
1834 || ( VmcsRevId.n.fIsShadowVmcs
1835 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
1836 {
1837 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
1838 {
1839 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
1840 VmcsRevId.n.u31RevisionId));
1841 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_VmcsRevId;
1842 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
1843 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1844 return VINF_SUCCESS;
1845 }
1846
1847 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
1848 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_ShadowVmcs;
1849 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
1850 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1851 return VINF_SUCCESS;
1852 }
1853
1854 /*
1855 * We only maintain only the current VMCS in our virtual CPU context (CPUMCTX). Therefore,
1856 * VMPTRLD shall always flush any existing current VMCS back to guest memory before loading
1857 * a new VMCS as current.
1858 */
1859 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
1860 {
1861 iemVmxCommitCurrentVmcsToMemory(pVCpu);
1862 IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
1863 }
1864 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Success;
1865 iemVmxVmSucceed(pVCpu);
1866 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1867 return VINF_SUCCESS;
1868}
1869
1870
1871/**
1872 * VMXON instruction execution worker.
1873 *
1874 * @param pVCpu The cross context virtual CPU structure.
1875 * @param cbInstr The instruction length.
1876 * @param iEffSeg The effective segment register to use with @a
1877 * GCPtrVmxon.
1878 * @param GCPtrVmxon The linear address of the VMXON pointer.
1879 * @param pExitInfo Pointer to the VM-exit instruction information struct.
1880 * Optional, can be NULL.
1881 *
1882 * @remarks Common VMX instruction checks are already expected to by the caller,
1883 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1884 */
1885IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
1886 PCVMXVEXITINFO pExitInfo)
1887{
1888#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1889 RT_NOREF5(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
1890 return VINF_EM_RAW_EMULATE_INSTR;
1891#else
1892 if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
1893 {
1894 /* CPL. */
1895 if (pVCpu->iem.s.uCpl > 0)
1896 {
1897 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1898 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cpl;
1899 return iemRaiseGeneralProtectionFault0(pVCpu);
1900 }
1901
1902 /* A20M (A20 Masked) mode. */
1903 if (!PGMPhysIsA20Enabled(pVCpu))
1904 {
1905 Log(("vmxon: A20M mode -> #GP(0)\n"));
1906 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_A20M;
1907 return iemRaiseGeneralProtectionFault0(pVCpu);
1908 }
1909
1910 /* CR0 fixed bits. */
1911 bool const fUnrestrictedGuest = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxUnrestrictedGuest;
1912 uint64_t const uCr0Fixed0 = fUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX : VMX_V_CR0_FIXED0;
1913 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
1914 {
1915 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
1916 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed0;
1917 return iemRaiseGeneralProtectionFault0(pVCpu);
1918 }
1919
1920 /* CR4 fixed bits. */
1921 if ((pVCpu->cpum.GstCtx.cr4 & VMX_V_CR4_FIXED0) != VMX_V_CR4_FIXED0)
1922 {
1923 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
1924 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed0;
1925 return iemRaiseGeneralProtectionFault0(pVCpu);
1926 }
1927
1928 /* Feature control MSR's LOCK and VMXON bits. */
1929 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
1930 if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
1931 {
1932 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
1933 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_MsrFeatCtl;
1934 return iemRaiseGeneralProtectionFault0(pVCpu);
1935 }
1936
1937 /* Get the VMXON pointer from the location specified by the source memory operand. */
1938 RTGCPHYS GCPhysVmxon;
1939 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
1940 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1941 {
1942 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
1943 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrMap;
1944 return rcStrict;
1945 }
1946
1947 /* VMXON region pointer alignment. */
1948 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
1949 {
1950 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
1951 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAlign;
1952 iemVmxVmFailInvalid(pVCpu);
1953 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1954 return VINF_SUCCESS;
1955 }
1956
1957 /* VMXON physical-address width limits. */
1958 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
1959 {
1960 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
1961 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrWidth;
1962 iemVmxVmFailInvalid(pVCpu);
1963 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1964 return VINF_SUCCESS;
1965 }
1966
1967 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
1968 restriction imposed by our implementation. */
1969 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
1970 {
1971 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
1972 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAbnormal;
1973 iemVmxVmFailInvalid(pVCpu);
1974 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1975 return VINF_SUCCESS;
1976 }
1977
1978 /* Read the VMCS revision ID from the VMXON region. */
1979 VMXVMCSREVID VmcsRevId;
1980 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
1981 if (RT_FAILURE(rc))
1982 {
1983 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
1984 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrReadPhys;
1985 return rc;
1986 }
1987
1988 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
1989 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
1990 {
1991 /* Revision ID mismatch. */
1992 if (!VmcsRevId.n.fIsShadowVmcs)
1993 {
1994 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
1995 VmcsRevId.n.u31RevisionId));
1996 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmcsRevId;
1997 iemVmxVmFailInvalid(pVCpu);
1998 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1999 return VINF_SUCCESS;
2000 }
2001
2002 /* Shadow VMCS disallowed. */
2003 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
2004 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_ShadowVmcs;
2005 iemVmxVmFailInvalid(pVCpu);
2006 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2007 return VINF_SUCCESS;
2008 }
2009
2010 /*
2011 * Record that we're in VMX operation, block INIT, block and disable A20M.
2012 */
2013 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
2014 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
2015 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
2016 /** @todo NSTVMX: clear address-range monitoring. */
2017 /** @todo NSTVMX: Intel PT. */
2018 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Success;
2019 iemVmxVmSucceed(pVCpu);
2020 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2021# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
2022 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
2023# else
2024 return VINF_SUCCESS;
2025# endif
2026 }
2027 else if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
2028 {
2029 RT_NOREF(pExitInfo);
2030 /** @todo NSTVMX: intercept. */
2031 }
2032
2033 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
2034
2035 /* CPL. */
2036 if (pVCpu->iem.s.uCpl > 0)
2037 {
2038 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
2039 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxRootCpl;
2040 return iemRaiseGeneralProtectionFault0(pVCpu);
2041 }
2042
2043 /* VMXON when already in VMX root mode. */
2044 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
2045 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxAlreadyRoot;
2046 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2047 return VINF_SUCCESS;
2048#endif
2049}
2050
2051
2052/**
2053 * Gets the instruction diagnostic for segment base checks during VM-entry of a
2054 * nested-guest.
2055 *
2056 * @param iSegReg The segment index (X86_SREG_XXX).
2057 */
2058IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBase(unsigned iSegReg)
2059{
2060 switch (iSegReg)
2061 {
2062 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseCs;
2063 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseDs;
2064 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseEs;
2065 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseFs;
2066 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseGs;
2067 default: return kVmxVDiag_Vmentry_GuestSegBaseSs;
2068 }
2069}
2070
2071
2072/**
2073 * Gets the instruction diagnostic for segment base checks during VM-entry of a
2074 * nested-guest that is in Virtual-8086 mode.
2075 *
2076 * @param iSegReg The segment index (X86_SREG_XXX).
2077 */
2078IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBaseV86(unsigned iSegReg)
2079{
2080 switch (iSegReg)
2081 {
2082 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseV86Cs;
2083 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ds;
2084 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseV86Es;
2085 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseV86Fs;
2086 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseV86Gs;
2087 default:
2088 Assert(iSegReg == X86_SREG_SS);
2089 return kVmxVDiag_Vmentry_GuestSegBaseV86Ss;
2090 }
2091}
2092
2093
2094/**
2095 * Gets the instruction diagnostic for segment limit checks during VM-entry of a
2096 * nested-guest that is in Virtual-8086 mode.
2097 *
2098 * @param iSegReg The segment index (X86_SREG_XXX).
2099 */
2100IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegLimitV86(unsigned iSegReg)
2101{
2102 switch (iSegReg)
2103 {
2104 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegLimitV86Cs;
2105 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ds;
2106 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegLimitV86Es;
2107 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegLimitV86Fs;
2108 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegLimitV86Gs;
2109 default:
2110 Assert(iSegReg == X86_SREG_SS);
2111 return kVmxVDiag_Vmentry_GuestSegLimitV86Ss;
2112 }
2113}
2114
2115
2116/**
2117 * Gets the instruction diagnostic for segment attribute checks during VM-entry of a
2118 * nested-guest that is in Virtual-8086 mode.
2119 *
2120 * @param iSegReg The segment index (X86_SREG_XXX).
2121 */
2122IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrV86(unsigned iSegReg)
2123{
2124 switch (iSegReg)
2125 {
2126 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrV86Cs;
2127 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ds;
2128 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrV86Es;
2129 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrV86Fs;
2130 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrV86Gs;
2131 default:
2132 Assert(iSegReg == X86_SREG_SS);
2133 return kVmxVDiag_Vmentry_GuestSegAttrV86Ss;
2134 }
2135}
2136
2137
2138/**
2139 * Gets the instruction diagnostic for segment attributes reserved bits failure
2140 * during VM-entry of a nested-guest.
2141 *
2142 * @param iSegReg The segment index (X86_SREG_XXX).
2143 */
2144IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrRsvd(unsigned iSegReg)
2145{
2146 switch (iSegReg)
2147 {
2148 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdCs;
2149 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdDs;
2150 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrRsvdEs;
2151 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdFs;
2152 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdGs;
2153 default:
2154 Assert(iSegReg == X86_SREG_SS);
2155 return kVmxVDiag_Vmentry_GuestSegAttrRsvdSs;
2156 }
2157}
2158
2159
2160/**
2161 * Gets the instruction diagnostic for segment attributes descriptor-type
2162 * (code/segment or system) failure during VM-entry of a nested-guest.
2163 *
2164 * @param iSegReg The segment index (X86_SREG_XXX).
2165 */
2166IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDescType(unsigned iSegReg)
2167{
2168 switch (iSegReg)
2169 {
2170 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeCs;
2171 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeDs;
2172 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeEs;
2173 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeFs;
2174 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeGs;
2175 default:
2176 Assert(iSegReg == X86_SREG_SS);
2177 return kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs;
2178 }
2179}
2180
2181
2182/**
2183 * Gets the instruction diagnostic for segment attributes descriptor-type
2184 * (code/segment or system) failure during VM-entry of a nested-guest.
2185 *
2186 * @param iSegReg The segment index (X86_SREG_XXX).
2187 */
2188IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrPresent(unsigned iSegReg)
2189{
2190 switch (iSegReg)
2191 {
2192 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrPresentCs;
2193 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrPresentDs;
2194 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrPresentEs;
2195 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrPresentFs;
2196 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrPresentGs;
2197 default:
2198 Assert(iSegReg == X86_SREG_SS);
2199 return kVmxVDiag_Vmentry_GuestSegAttrPresentSs;
2200 }
2201}
2202
2203
2204/**
2205 * Gets the instruction diagnostic for segment attribute granularity failure during
2206 * VM-entry of a nested-guest.
2207 *
2208 * @param iSegReg The segment index (X86_SREG_XXX).
2209 */
2210IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrGran(unsigned iSegReg)
2211{
2212 switch (iSegReg)
2213 {
2214 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrGranCs;
2215 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrGranDs;
2216 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrGranEs;
2217 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrGranFs;
2218 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrGranGs;
2219 default:
2220 Assert(iSegReg == X86_SREG_SS);
2221 return kVmxVDiag_Vmentry_GuestSegAttrGranSs;
2222 }
2223}
2224
2225/**
2226 * Gets the instruction diagnostic for segment attribute DPL/RPL failure during
2227 * VM-entry of a nested-guest.
2228 *
2229 * @param iSegReg The segment index (X86_SREG_XXX).
2230 */
2231IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDplRpl(unsigned iSegReg)
2232{
2233 switch (iSegReg)
2234 {
2235 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplCs;
2236 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplDs;
2237 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDplRplEs;
2238 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplFs;
2239 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplGs;
2240 default:
2241 Assert(iSegReg == X86_SREG_SS);
2242 return kVmxVDiag_Vmentry_GuestSegAttrDplRplSs;
2243 }
2244}
2245
2246
2247/**
2248 * Gets the instruction diagnostic for segment attribute type accessed failure
2249 * during VM-entry of a nested-guest.
2250 *
2251 * @param iSegReg The segment index (X86_SREG_XXX).
2252 */
2253IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrTypeAcc(unsigned iSegReg)
2254{
2255 switch (iSegReg)
2256 {
2257 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccCs;
2258 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccDs;
2259 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccEs;
2260 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccFs;
2261 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccGs;
2262 default:
2263 Assert(iSegReg == X86_SREG_SS);
2264 return kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs;
2265 }
2266}
2267
2268
2269/**
2270 * Checks guest control registers, debug registers and MSRs as part of VM-entry.
2271 *
2272 * @param pVCpu The cross context virtual CPU structure.
2273 * @param pszInstr The VMX instruction name (for logging purposes).
2274 */
2275IEM_STATIC int iemVmxVmentryCheckGuestControlRegsMsrs(PVMCPU pVCpu, const char *pszInstr)
2276{
2277 /*
2278 * Guest Control Registers, Debug Registers, and MSRs.
2279 * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs".
2280 */
2281 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2282 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
2283 const char *const pszFailure = "VM-exit";
2284
2285 /* CR0 reserved bits. */
2286 {
2287 /* CR0 MB1 bits. */
2288 uint64_t u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
2289 Assert(u64Cr0Fixed0 & (X86_CR0_NW | X86_CR0_CD));
2290 if (fUnrestrictedGuest)
2291 u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
2292 if (~pVmcs->u64GuestCr0.u & u64Cr0Fixed0)
2293 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed0);
2294
2295 /* CR0 MBZ bits. */
2296 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
2297 if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1)
2298 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1);
2299
2300 /* Without unrestricted guest support, VT-x supports does not support unpaged protected mode. */
2301 if ( !fUnrestrictedGuest
2302 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)
2303 && !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
2304 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0PgPe);
2305 }
2306
2307 /* CR4 reserved bits. */
2308 {
2309 /* CR4 MB1 bits. */
2310 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
2311 if (~pVmcs->u64GuestCr4.u & u64Cr4Fixed0)
2312 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0);
2313
2314 /* CR4 MBZ bits. */
2315 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
2316 if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1)
2317 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1);
2318 }
2319
2320 /* DEBUGCTL MSR. */
2321 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
2322 && (pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL))
2323 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDebugCtl);
2324
2325 /* 64-bit CPU checks. */
2326 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
2327 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2328 {
2329 if (fGstInLongMode)
2330 {
2331 /* PAE must be set. */
2332 if ( (pVmcs->u64GuestCr0.u & X86_CR0_PG)
2333 && (pVmcs->u64GuestCr0.u & X86_CR4_PAE))
2334 { /* likely */ }
2335 else
2336 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPae);
2337 }
2338 else
2339 {
2340 /* PCIDE should not be set. */
2341 if (!(pVmcs->u64GuestCr4.u & X86_CR4_PCIDE))
2342 { /* likely */ }
2343 else
2344 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPcide);
2345 }
2346
2347 /* CR3. */
2348 if (!(pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
2349 { /* likely */ }
2350 else
2351 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr3);
2352
2353 /* DR7. */
2354 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
2355 && (pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK))
2356 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDr7);
2357
2358 /* SYSENTER ESP and SYSENTER EIP. */
2359 if ( X86_IS_CANONICAL(pVmcs->u64GuestSysenterEsp.u)
2360 && X86_IS_CANONICAL(pVmcs->u64GuestSysenterEip.u))
2361 { /* likely */ }
2362 else
2363 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSysenterEspEip);
2364 }
2365
2366 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR)); /* We don't support loading IA32_PERF_GLOBAL_CTRL MSR yet. */
2367
2368 /* PAT MSR. */
2369 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
2370 && !CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u))
2371 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPatMsr);
2372
2373 /* EFER MSR. */
2374 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
2375 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
2376 && (pVmcs->u64GuestEferMsr.u & ~uValidEferMask))
2377 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsrRsvd);
2378
2379 bool const fGstLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
2380 bool const fGstLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
2381 if ( fGstInLongMode == fGstLma
2382 && ( !(pVmcs->u64GuestCr0.u & X86_CR0_PG)
2383 || fGstLma == fGstLme))
2384 { /* likely */ }
2385 else
2386 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsr);
2387
2388 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR)); /* We don't support loading IA32_BNDCFGS MSR yet. */
2389
2390 NOREF(pszInstr);
2391 NOREF(pszFailure);
2392 return VINF_SUCCESS;
2393}
2394
2395
2396/**
2397 * Checks guest segment registers, LDTR and TR as part of VM-entry.
2398 *
2399 * @param pVCpu The cross context virtual CPU structure.
2400 * @param pszInstr The VMX instruction name (for logging purposes).
2401 */
2402IEM_STATIC int iemVmxVmentryCheckGuestSegRegs(PVMCPU pVCpu, const char *pszInstr)
2403{
2404 /*
2405 * Segment registers.
2406 * See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2407 */
2408 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2409 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);
2410 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
2411 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
2412 const char *const pszFailure = "VM-exit";
2413
2414 /* Selectors. */
2415 if ( !fGstInV86Mode
2416 && !fUnrestrictedGuest
2417 && (pVmcs->GuestSs & X86_SEL_RPL) != (pVmcs->GuestCs & X86_SEL_RPL))
2418 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelCsSsRpl);
2419
2420 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
2421 {
2422 CPUMSELREG SelReg;
2423 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &SelReg);
2424 if (RT_LIKELY(rc == VINF_SUCCESS))
2425 { /* likely */ }
2426 else
2427 return rc;
2428
2429 /*
2430 * Virtual-8086 mode checks.
2431 */
2432 if (fGstInV86Mode)
2433 {
2434 /* Base address. */
2435 if (SelReg.u64Base == (uint64_t)SelReg.Sel << 4)
2436 { /* likely */ }
2437 else
2438 {
2439 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBaseV86(iSegReg);
2440 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2441 }
2442
2443 /* Limit. */
2444 if (SelReg.u32Limit == 0xffff)
2445 { /* likely */ }
2446 else
2447 {
2448 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegLimitV86(iSegReg);
2449 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2450 }
2451
2452 /* Attribute. */
2453 if (SelReg.Attr.u == 0xf3)
2454 { /* likely */ }
2455 else
2456 {
2457 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrV86(iSegReg);
2458 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2459 }
2460
2461 /* We're done; move to checking the next segment. */
2462 continue;
2463 }
2464
2465 /* Checks done by 64-bit CPUs. */
2466 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2467 {
2468 /* Base address. */
2469 if ( iSegReg == X86_SREG_FS
2470 || iSegReg == X86_SREG_GS)
2471 {
2472 if (X86_IS_CANONICAL(SelReg.u64Base))
2473 { /* likely */ }
2474 else
2475 {
2476 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
2477 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2478 }
2479 }
2480 else if (iSegReg == X86_SREG_CS)
2481 {
2482 if (!RT_HI_U32(SelReg.u64Base))
2483 { /* likely */ }
2484 else
2485 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseCs);
2486 }
2487 else
2488 {
2489 if ( SelReg.Attr.n.u1Unusable
2490 || !RT_HI_U32(SelReg.u64Base))
2491 { /* likely */ }
2492 else
2493 {
2494 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
2495 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2496 }
2497 }
2498 }
2499
2500 /*
2501 * Checks outside Virtual-8086 mode.
2502 */
2503 uint8_t const uSegType = SelReg.Attr.n.u4Type;
2504 uint8_t const fCodeDataSeg = SelReg.Attr.n.u1DescType;
2505 uint8_t const fUsable = !SelReg.Attr.n.u1Unusable;
2506 uint8_t const uDpl = SelReg.Attr.n.u2Dpl;
2507 uint8_t const fPresent = SelReg.Attr.n.u1Present;
2508 uint8_t const uGranularity = SelReg.Attr.n.u1Granularity;
2509 uint8_t const uDefBig = SelReg.Attr.n.u1DefBig;
2510 uint8_t const fSegLong = SelReg.Attr.n.u1Long;
2511
2512 /* Code or usable segment. */
2513 if ( iSegReg == X86_SREG_CS
2514 || fUsable)
2515 {
2516 /* Reserved bits (bits 31:17 and bits 11:8). */
2517 if (!(SelReg.Attr.u & 0xfffe0f00))
2518 { /* likely */ }
2519 else
2520 {
2521 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrRsvd(iSegReg);
2522 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2523 }
2524
2525 /* Descriptor type. */
2526 if (fCodeDataSeg)
2527 { /* likely */ }
2528 else
2529 {
2530 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDescType(iSegReg);
2531 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2532 }
2533
2534 /* Present. */
2535 if (fPresent)
2536 { /* likely */ }
2537 else
2538 {
2539 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrPresent(iSegReg);
2540 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2541 }
2542
2543 /* Granularity. */
2544 if ( ((SelReg.u32Limit & 0x00000fff) == 0x00000fff || !uGranularity)
2545 && ((SelReg.u32Limit & 0xfff00000) == 0x00000000 || uGranularity))
2546 { /* likely */ }
2547 else
2548 {
2549 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrGran(iSegReg);
2550 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2551 }
2552 }
2553
2554 if (iSegReg == X86_SREG_CS)
2555 {
2556 /* Segment Type and DPL. */
2557 if ( uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
2558 && fUnrestrictedGuest)
2559 {
2560 if (uDpl == 0)
2561 { /* likely */ }
2562 else
2563 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplZero);
2564 }
2565 else if ( uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_ACCESSED)
2566 || uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
2567 {
2568 X86DESCATTR SsAttr; SsAttr.u = pVmcs->u32GuestSsAttr;
2569 if (uDpl == SsAttr.n.u2Dpl)
2570 { /* likely */ }
2571 else
2572 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplEqSs);
2573 }
2574 else if ((uSegType & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
2575 == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
2576 {
2577 X86DESCATTR SsAttr; SsAttr.u = pVmcs->u32GuestSsAttr;
2578 if (uDpl <= SsAttr.n.u2Dpl)
2579 { /* likely */ }
2580 else
2581 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplLtSs);
2582 }
2583 else
2584 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsType);
2585
2586 /* Def/Big. */
2587 if ( fGstInLongMode
2588 && fSegLong)
2589 {
2590 if (uDefBig == 0)
2591 { /* likely */ }
2592 else
2593 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDefBig);
2594 }
2595 }
2596 else if (iSegReg == X86_SREG_SS)
2597 {
2598 /* Segment Type. */
2599 if ( !fUsable
2600 || uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
2601 || uSegType == (X86_SEL_TYPE_DOWN | X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED))
2602 { /* likely */ }
2603 else
2604 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsType);
2605
2606 /* DPL. */
2607 if (fUnrestrictedGuest)
2608 {
2609 if (uDpl == (SelReg.Sel & X86_SEL_RPL))
2610 { /* likely */ }
2611 else
2612 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplEqRpl);
2613 }
2614 X86DESCATTR CsAttr; CsAttr.u = pVmcs->u32GuestCsAttr;
2615 if ( CsAttr.n.u4Type == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
2616 || (pVmcs->u64GuestCr0.u & X86_CR0_PE))
2617 {
2618 if (uDpl == 0)
2619 { /* likely */ }
2620 else
2621 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplZero);
2622 }
2623 }
2624 else
2625 {
2626 /* DS, ES, FS, GS. */
2627 if (fUsable)
2628 {
2629 /* Segment type. */
2630 if (uSegType & X86_SEL_TYPE_ACCESSED)
2631 { /* likely */ }
2632 else
2633 {
2634 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrTypeAcc(iSegReg);
2635 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2636 }
2637
2638 if ( !(uSegType & X86_SEL_TYPE_CODE)
2639 || (uSegType & X86_SEL_TYPE_READ))
2640 { /* likely */ }
2641 else
2642 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsTypeRead);
2643
2644 /* DPL. */
2645 if ( !fUnrestrictedGuest
2646 && uSegType <= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
2647 {
2648 if (uDpl >= (SelReg.Sel & X86_SEL_RPL))
2649 { /* likely */ }
2650 else
2651 {
2652 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDplRpl(iSegReg);
2653 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2654 }
2655 }
2656 }
2657 }
2658 }
2659
2660 /*
2661 * LDTR.
2662 */
2663 {
2664 CPUMSELREG Ldtr;
2665 Ldtr.Sel = pVmcs->GuestLdtr;
2666 Ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
2667 Ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
2668 Ldtr.Attr.u = pVmcs->u32GuestLdtrLimit;
2669
2670 if (!Ldtr.Attr.n.u1Unusable)
2671 {
2672 /* Selector. */
2673 if (!(Ldtr.Sel & X86_SEL_LDT))
2674 { /* likely */ }
2675 else
2676 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelLdtr);
2677
2678 /* Base. */
2679 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2680 {
2681 if (X86_IS_CANONICAL(Ldtr.u64Base))
2682 { /* likely */ }
2683 else
2684 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseLdtr);
2685 }
2686
2687 /* Attributes. */
2688 /* Reserved bits (bits 31:17 and bits 11:8). */
2689 if (!(Ldtr.Attr.u & 0xfffe0f00))
2690 { /* likely */ }
2691 else
2692 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrRsvd);
2693
2694 if (Ldtr.Attr.n.u4Type == X86_SEL_TYPE_SYS_LDT)
2695 { /* likely */ }
2696 else
2697 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrType);
2698
2699 if (!Ldtr.Attr.n.u1DescType)
2700 { /* likely */ }
2701 else
2702 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrDescType);
2703
2704 if (Ldtr.Attr.n.u1Present)
2705 { /* likely */ }
2706 else
2707 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrPresent);
2708
2709 if ( ((Ldtr.u32Limit & 0x00000fff) == 0x00000fff || !Ldtr.Attr.n.u1Granularity)
2710 && ((Ldtr.u32Limit & 0xfff00000) == 0x00000000 || Ldtr.Attr.n.u1Granularity))
2711 { /* likely */ }
2712 else
2713 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrGran);
2714 }
2715 }
2716
2717 /*
2718 * TR.
2719 */
2720 {
2721 CPUMSELREG Tr;
2722 Tr.Sel = pVmcs->GuestTr;
2723 Tr.u32Limit = pVmcs->u32GuestTrLimit;
2724 Tr.u64Base = pVmcs->u64GuestTrBase.u;
2725 Tr.Attr.u = pVmcs->u32GuestTrLimit;
2726
2727 /* Selector. */
2728 if (!(Tr.Sel & X86_SEL_LDT))
2729 { /* likely */ }
2730 else
2731 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelTr);
2732
2733 /* Base. */
2734 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2735 {
2736 if (X86_IS_CANONICAL(Tr.u64Base))
2737 { /* likely */ }
2738 else
2739 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseTr);
2740 }
2741
2742 /* Attributes. */
2743 /* Reserved bits (bits 31:17 and bits 11:8). */
2744 if (!(Tr.Attr.u & 0xfffe0f00))
2745 { /* likely */ }
2746 else
2747 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrRsvd);
2748
2749 if (!Tr.Attr.n.u1Unusable)
2750 { /* likely */ }
2751 else
2752 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrUnusable);
2753
2754 if ( Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY
2755 || ( !fGstInLongMode
2756 && Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY))
2757 { /* likely */ }
2758 else
2759 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrType);
2760
2761 if (!Tr.Attr.n.u1DescType)
2762 { /* likely */ }
2763 else
2764 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrDescType);
2765
2766 if (Tr.Attr.n.u1Present)
2767 { /* likely */ }
2768 else
2769 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrPresent);
2770
2771 if ( ((Tr.u32Limit & 0x00000fff) == 0x00000fff || !Tr.Attr.n.u1Granularity)
2772 && ((Tr.u32Limit & 0xfff00000) == 0x00000000 || Tr.Attr.n.u1Granularity))
2773 { /* likely */ }
2774 else
2775 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrGran);
2776 }
2777
2778 NOREF(pszInstr);
2779 NOREF(pszFailure);
2780 return VINF_SUCCESS;
2781}
2782
2783
2784/**
2785 * Checks guest GDTR and IDTR as part of VM-entry.
2786 *
2787 * @param pVCpu The cross context virtual CPU structure.
2788 * @param pszInstr The VMX instruction name (for logging purposes).
2789 */
2790IEM_STATIC int iemVmxVmentryCheckGuestGdtrIdtr(PVMCPU pVCpu, const char *pszInstr)
2791{
2792 /*
2793 * GDTR and IDTR.
2794 * See Intel spec. 26.3.1.3 "Checks on Guest Descriptor-Table Registers".
2795 */
2796 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2797 const char *const pszFailure = "VM-exit";
2798 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2799 {
2800 /* Base. */
2801 if (X86_IS_CANONICAL(pVmcs->u64GuestGdtrBase.u))
2802 { /* likely */ }
2803 else
2804 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrBase);
2805
2806 if (X86_IS_CANONICAL(pVmcs->u64GuestIdtrBase.u))
2807 { /* likely */ }
2808 else
2809 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrBase);
2810 }
2811
2812 /* Limit. */
2813 if (!RT_HI_U16(pVmcs->u32GuestGdtrLimit))
2814 { /* likely */ }
2815 else
2816 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrLimit);
2817
2818 if (!RT_HI_U16(pVmcs->u32GuestIdtrLimit))
2819 { /* likely */ }
2820 else
2821 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrLimit);
2822
2823 NOREF(pszInstr);
2824 NOREF(pszFailure);
2825 return VINF_SUCCESS;
2826}
2827
2828
2829/**
2830 * Checks guest RIP and RFLAGS as part of VM-entry.
2831 *
2832 * @param pVCpu The cross context virtual CPU structure.
2833 * @param pszInstr The VMX instruction name (for logging purposes).
2834 */
2835IEM_STATIC int iemVmxVmentryCheckGuestRipRFlags(PVMCPU pVCpu, const char *pszInstr)
2836{
2837 /*
2838 * RIP and RFLAGS.
2839 * See Intel spec. 26.3.1.4 "Checks on Guest RIP and RFLAGS".
2840 */
2841 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2842 const char *const pszFailure = "VM-exit";
2843 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
2844
2845 /* RIP. */
2846 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2847 {
2848 X86DESCATTR CsAttr; CsAttr.u = pVmcs->u32GuestCsAttr;
2849 if ( !fGstInLongMode
2850 || !CsAttr.n.u1Long)
2851 {
2852 if (!RT_HI_U32(pVmcs->u64GuestRip.u))
2853 { /* likely */ }
2854 else
2855 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRipRsvd);
2856 }
2857
2858 if ( fGstInLongMode
2859 && CsAttr.n.u1Long)
2860 {
2861 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth == 48); /* Canonical. */
2862 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth < 64
2863 && X86_IS_CANONICAL(pVmcs->u64GuestRip.u))
2864 { /* likely */ }
2865 else
2866 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRip);
2867 }
2868 }
2869
2870 /* RFLAGS (bits 63:22 (or 31:22), bits 15, 5, 3 are reserved, bit 1 MB1). */
2871 uint64_t const uGuestRFlags = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode ? pVmcs->u64GuestRFlags.u
2872 : pVmcs->u64GuestRFlags.s.Lo;
2873 if ( !(uGuestRFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK))
2874 && (uGuestRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK)
2875 { /* likely */ }
2876 else
2877 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsRsvd);
2878
2879 if ( fGstInLongMode
2880 || !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
2881 {
2882 if (!(uGuestRFlags & X86_EFL_VM))
2883 { /* likely */ }
2884 else
2885 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsVm);
2886 }
2887
2888 if ( VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo)
2889 && VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo) == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
2890 {
2891 if (uGuestRFlags & X86_EFL_IF)
2892 { /* likely */ }
2893 else
2894 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsIf);
2895 }
2896
2897 NOREF(pszInstr);
2898 NOREF(pszFailure);
2899 return VINF_SUCCESS;
2900}
2901
2902
2903/**
2904 * Checks guest non-register state as part of VM-entry.
2905 *
2906 * @param pVCpu The cross context virtual CPU structure.
2907 * @param pszInstr The VMX instruction name (for logging purposes).
2908 */
2909IEM_STATIC int iemVmxVmentryCheckGuestNonRegState(PVMCPU pVCpu, const char *pszInstr)
2910{
2911 /*
2912 * Guest non-register state.
2913 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
2914 */
2915 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2916 const char *const pszFailure = "VM-exit";
2917
2918 /*
2919 * Activity state.
2920 */
2921 uint64_t const u64GuestVmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
2922 uint32_t const fActivityStateMask = RT_BF_GET(u64GuestVmxMiscMsr, VMX_BF_MISC_ACTIVITY_STATES);
2923 if (!(pVmcs->u32GuestActivityState & fActivityStateMask))
2924 { /* likely */ }
2925 else
2926 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateRsvd);
2927
2928 X86DESCATTR SsAttr; SsAttr.u = pVmcs->u32GuestSsAttr;
2929
2930 if ( !SsAttr.n.u2Dpl
2931 || pVmcs->u32GuestActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT)
2932 { /* likely */ }
2933 else
2934 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateSsDpl);
2935
2936 if ( pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI
2937 || pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
2938 {
2939 if (pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE)
2940 { /* likely */ }
2941 else
2942 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateStiMovSs);
2943 }
2944
2945 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
2946 {
2947 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
2948 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(pVmcs->u32EntryIntInfo);
2949 AssertCompile(VMX_V_GUEST_ACTIVITY_STATE_MASK == (VMX_VMCS_GUEST_ACTIVITY_HLT | VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN));
2950 switch (pVmcs->u32GuestActivityState)
2951 {
2952 case VMX_VMCS_GUEST_ACTIVITY_HLT:
2953 {
2954 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT
2955 || uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
2956 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
2957 && ( uVector == X86_XCPT_DB
2958 || uVector == X86_XCPT_MC))
2959 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
2960 && uVector == 0))
2961 { /* likely */ }
2962 else
2963 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateHlt);
2964 break;
2965 }
2966
2967 case VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN:
2968 {
2969 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
2970 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
2971 && uVector == X86_XCPT_MC))
2972 { /* likely */ }
2973 else
2974 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateShutdown);
2975 break;
2976 }
2977
2978 case VMX_VMCS_GUEST_ACTIVITY_ACTIVE:
2979 default:
2980 break;
2981 }
2982 }
2983
2984 /*
2985 * Interruptibility state.
2986 */
2987 if (!(pVmcs->u32GuestIntrState & ~VMX_VMCS_GUEST_INT_STATE_MASK))
2988 { /* likely */ }
2989 else
2990 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRsvd);
2991
2992 if ((pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
2993 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
2994 { /* likely */ }
2995 else
2996 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateStiMovSs);
2997
2998 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_IF)
2999 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3000 { /* likely */ }
3001 else
3002 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRFlagsSti);
3003
3004 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
3005 {
3006 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
3007 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3008 {
3009 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
3010 { /* likely */ }
3011 else
3012 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateExtInt);
3013 }
3014 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
3015 {
3016 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
3017 { /* likely */ }
3018 else
3019 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateNmi);
3020
3021 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
3022 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI))
3023 { /* likely */ }
3024 else
3025 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateVirtNmi);
3026 }
3027 }
3028
3029 /* We don't support SMM yet. So blocking-by-SMIs must not be set. */
3030 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI))
3031 { /* likely */ }
3032 else
3033 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateSmi);
3034
3035 /* We don't support SGX yet. So enclave-interruption must not be set. */
3036 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_ENCLAVE))
3037 { /* likely */ }
3038 else
3039 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateEnclave);
3040
3041 /*
3042 * Pending debug exceptions.
3043 */
3044 uint64_t const uPendingDbgXcpt = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode
3045 ? pVmcs->u64GuestPendingDbgXcpt.u
3046 : pVmcs->u64GuestPendingDbgXcpt.s.Lo;
3047 if (!(uPendingDbgXcpt & ~VMX_VMCS_GUEST_PENDING_DEBUG_VALID_MASK))
3048 { /* likely */ }
3049 else
3050 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRsvd);
3051
3052 if ( (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3053 || pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
3054 {
3055 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_TF)
3056 && !(pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF)
3057 && !(uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
3058 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf);
3059
3060 if ( ( !(pVmcs->u64GuestRFlags.u & X86_EFL_TF)
3061 || (pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF))
3062 && (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
3063 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf);
3064 }
3065
3066 /* We don't support RTM (Real-time Transactional Memory) yet. */
3067 if (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_RTM)
3068 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRtm);
3069
3070 /*
3071 * VMCS link pointer.
3072 */
3073 if (pVmcs->u64VmcsLinkPtr.u != UINT64_C(0xffffffffffffffff))
3074 {
3075 /* We don't support SMM yet (so VMCS link pointer cannot be the current VMCS). */
3076 if (pVmcs->u64VmcsLinkPtr.u != IEM_VMX_GET_CURRENT_VMCS(pVCpu))
3077 { /* likely */ }
3078 else
3079 {
3080 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
3081 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrCurVmcs);
3082 }
3083
3084 /* Validate the address. */
3085 if ( (pVmcs->u64VmcsLinkPtr.u & X86_PAGE_4K_OFFSET_MASK)
3086 || (pVmcs->u64VmcsLinkPtr.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3087 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64VmcsLinkPtr.u))
3088 {
3089 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
3090 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmcsLinkPtr);
3091 }
3092
3093 /* Read the VMCS-link pointer from guest memory. */
3094 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs));
3095 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs),
3096 pVmcs->u64VmcsLinkPtr.u, VMX_V_VMCS_SIZE);
3097 if (RT_FAILURE(rc))
3098 {
3099 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
3100 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys);
3101 }
3102
3103 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
3104 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID)
3105 { /* likely */ }
3106 else
3107 {
3108 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
3109 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrRevId);
3110 }
3111
3112 /* Verify the shadow bit is set if VMCS shadowing is enabled . */
3113 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3114 || pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.fIsShadowVmcs)
3115 { /* likely */ }
3116 else
3117 {
3118 pVmcs->u64ExitQual.u = VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR;
3119 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrShadow);
3120 }
3121 }
3122
3123 NOREF(pszInstr);
3124 NOREF(pszFailure);
3125 return VINF_SUCCESS;
3126}
3127
3128
3129/**
3130 * Checks guest-state as part of VM-entry.
3131 *
3132 * @returns VBox status code.
3133 * @param pVCpu The cross context virtual CPU structure.
3134 * @param pszInstr The VMX instruction name (for logging purposes).
3135 */
3136IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr)
3137{
3138 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
3139 if (rc == VINF_SUCCESS)
3140 { /* likely */ }
3141 else
3142 return rc;
3143
3144 rc = iemVmxVmentryCheckGuestSegRegs(pVCpu, pszInstr);
3145 if (rc == VINF_SUCCESS)
3146 { /* likely */ }
3147 else
3148 return rc;
3149
3150 rc = iemVmxVmentryCheckGuestGdtrIdtr(pVCpu, pszInstr);
3151 if (rc == VINF_SUCCESS)
3152 { /* likely */ }
3153 else
3154 return rc;
3155
3156 rc = iemVmxVmentryCheckGuestRipRFlags(pVCpu, pszInstr);
3157 if (rc == VINF_SUCCESS)
3158 { /* likely */ }
3159 else
3160 return rc;
3161
3162 rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr);
3163 if (rc == VINF_SUCCESS)
3164 { /* likely */ }
3165 else
3166 return rc;
3167
3168 return VINF_SUCCESS;
3169}
3170
3171
3172/**
3173 * Checks host-state as part of VM-entry.
3174 *
3175 * @returns VBox status code.
3176 * @param pVCpu The cross context virtual CPU structure.
3177 * @param pszInstr The VMX instruction name (for logging purposes).
3178 */
3179IEM_STATIC int iemVmxVmentryCheckHostState(PVMCPU pVCpu, const char *pszInstr)
3180{
3181 /*
3182 * Host Control Registers and MSRs.
3183 * See Intel spec. 26.2.2 "Checks on Host Control Registers and MSRs".
3184 */
3185 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3186 const char * const pszFailure = "VMFail";
3187
3188 /* CR0 reserved bits. */
3189 {
3190 /* CR0 MB1 bits. */
3191 uint64_t const u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
3192 if (~pVmcs->u64HostCr0.u & u64Cr0Fixed0)
3193 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0);
3194
3195 /* CR0 MBZ bits. */
3196 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
3197 if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1)
3198 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1);
3199 }
3200
3201 /* CR4 reserved bits. */
3202 {
3203 /* CR4 MB1 bits. */
3204 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
3205 if (~pVmcs->u64HostCr4.u & u64Cr4Fixed0)
3206 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0);
3207
3208 /* CR4 MBZ bits. */
3209 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
3210 if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1)
3211 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1);
3212 }
3213
3214 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3215 {
3216 /* CR3 reserved bits. */
3217 if (!(pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
3218 { /* likely */ }
3219 else
3220 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr3);
3221
3222 /* SYSENTER ESP and SYSENTER EIP. */
3223 if ( X86_IS_CANONICAL(pVmcs->u64HostSysenterEsp.u)
3224 && X86_IS_CANONICAL(pVmcs->u64HostSysenterEip.u))
3225 { /* likely */ }
3226 else
3227 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSysenterEspEip);
3228 }
3229
3230 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PERF_MSR)); /* We don't support loading IA32_PERF_GLOBAL_CTRL MSR yet. */
3231
3232 /* PAT MSR. */
3233 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
3234 || CPUMIsPatMsrValid(pVmcs->u64HostPatMsr.u))
3235 { /* likely */ }
3236 else
3237 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostPatMsr);
3238
3239 /* EFER MSR. */
3240 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
3241 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
3242 || !(pVmcs->u64HostEferMsr.u & ~uValidEferMask))
3243 { /* likely */ }
3244 else
3245 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsrRsvd);
3246
3247 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
3248 bool const fHostLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
3249 bool const fHostLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
3250 if ( fHostInLongMode == fHostLma
3251 && fHostInLongMode == fHostLme)
3252 { /* likely */ }
3253 else
3254 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsr);
3255
3256 /*
3257 * Host Segment and Descriptor-Table Registers.
3258 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
3259 */
3260 /* Selector RPL and TI. */
3261 if ( !(pVmcs->HostCs & (X86_SEL_RPL | X86_SEL_LDT))
3262 && !(pVmcs->HostSs & (X86_SEL_RPL | X86_SEL_LDT))
3263 && !(pVmcs->HostDs & (X86_SEL_RPL | X86_SEL_LDT))
3264 && !(pVmcs->HostEs & (X86_SEL_RPL | X86_SEL_LDT))
3265 && !(pVmcs->HostFs & (X86_SEL_RPL | X86_SEL_LDT))
3266 && !(pVmcs->HostGs & (X86_SEL_RPL | X86_SEL_LDT))
3267 && !(pVmcs->HostTr & (X86_SEL_RPL | X86_SEL_LDT)))
3268 { /* likely */ }
3269 else
3270 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSel);
3271
3272 /* CS and TR selectors cannot be 0. */
3273 if ( pVmcs->HostCs
3274 && pVmcs->HostTr)
3275 { /* likely */ }
3276 else
3277 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCsTr);
3278
3279 /* SS cannot be 0 if 32-bit host. */
3280 if ( fHostInLongMode
3281 || pVmcs->HostSs)
3282 { /* likely */ }
3283 else
3284 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSs);
3285
3286 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3287 {
3288 /* FS, GS, GDTR, IDTR, TR base address. */
3289 if ( X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
3290 && X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
3291 && X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u)
3292 && X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u)
3293 && X86_IS_CANONICAL(pVmcs->u64HostTrBase.u))
3294 { /* likely */ }
3295 else
3296 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSegBase);
3297 }
3298
3299 /*
3300 * Host address-space size for 64-bit CPUs.
3301 * See Intel spec. 26.2.4 "Checks Related to Address-Space Size".
3302 */
3303 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
3304 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3305 {
3306 bool const fCpuInLongMode = CPUMIsGuestInLongMode(pVCpu);
3307
3308 /* Logical processor in IA-32e mode. */
3309 if (fCpuInLongMode)
3310 {
3311 if (fHostInLongMode)
3312 {
3313 /* PAE must be set. */
3314 if (pVmcs->u64HostCr4.u & X86_CR4_PAE)
3315 { /* likely */ }
3316 else
3317 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pae);
3318
3319 /* RIP must be canonical. */
3320 if (X86_IS_CANONICAL(pVmcs->u64HostRip.u))
3321 { /* likely */ }
3322 else
3323 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRip);
3324 }
3325 else
3326 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostLongMode);
3327 }
3328 else
3329 {
3330 /* Logical processor is outside IA-32e mode. */
3331 if ( !fGstInLongMode
3332 && !fHostInLongMode)
3333 {
3334 /* PCIDE should not be set. */
3335 if (!(pVmcs->u64HostCr4.u & X86_CR4_PCIDE))
3336 { /* likely */ }
3337 else
3338 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pcide);
3339
3340 /* The high 32-bits of RIP MBZ. */
3341 if (!pVmcs->u64HostRip.s.Hi)
3342 { /* likely */ }
3343 else
3344 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRipRsvd);
3345 }
3346 else
3347 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongMode);
3348 }
3349 }
3350 else
3351 {
3352 /* Host address-space size for 32-bit CPUs. */
3353 if ( !fGstInLongMode
3354 && !fHostInLongMode)
3355 { /* likely */ }
3356 else
3357 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongModeNoCpu);
3358 }
3359
3360 NOREF(pszInstr);
3361 NOREF(pszFailure);
3362 return VINF_SUCCESS;
3363}
3364
3365
3366/**
3367 * Checks VM-entry controls fields as part of VM-entry.
3368 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
3369 *
3370 * @returns VBox status code.
3371 * @param pVCpu The cross context virtual CPU structure.
3372 * @param pszInstr The VMX instruction name (for logging purposes).
3373 */
3374IEM_STATIC int iemVmxVmentryCheckEntryCtls(PVMCPU pVCpu, const char *pszInstr)
3375{
3376 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3377 const char * const pszFailure = "VMFail";
3378
3379 /* VM-entry controls. */
3380 VMXCTLSMSR EntryCtls;
3381 EntryCtls.u = CPUMGetGuestIa32VmxEntryCtls(pVCpu);
3382 if (~pVmcs->u32EntryCtls & EntryCtls.n.disallowed0)
3383 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
3384
3385 if (pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1)
3386 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1);
3387
3388 /* Event injection. */
3389 uint32_t const uIntInfo = pVmcs->u32EntryIntInfo;
3390 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VALID))
3391 {
3392 /* Type and vector. */
3393 uint8_t const uType = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_TYPE);
3394 uint8_t const uVector = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VECTOR);
3395 uint8_t const uRsvd = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_RSVD_12_30);
3396 if ( uRsvd == 0
3397 && HMVmxIsEntryIntInfoTypeValid(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxMonitorTrapFlag, uType)
3398 && HMVmxIsEntryIntInfoVectorValid(uVector, uType))
3399 { /* likely */ }
3400 else
3401 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd);
3402
3403 /* Exception error code. */
3404 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID))
3405 {
3406 /* Delivery possible only in Unrestricted-guest mode when CR0.PE is set. */
3407 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
3408 || (pVmcs->u64GuestCr0.s.Lo & X86_CR0_PE))
3409 { /* likely */ }
3410 else
3411 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodePe);
3412
3413 /* Exceptions that provide an error code. */
3414 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
3415 && ( uVector == X86_XCPT_DF
3416 || uVector == X86_XCPT_TS
3417 || uVector == X86_XCPT_NP
3418 || uVector == X86_XCPT_SS
3419 || uVector == X86_XCPT_GP
3420 || uVector == X86_XCPT_PF
3421 || uVector == X86_XCPT_AC))
3422 { /* likely */ }
3423 else
3424 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec);
3425
3426 /* Exception error-code reserved bits. */
3427 if (!(pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK))
3428 { /* likely */ }
3429 else
3430 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd);
3431
3432 /* Injecting a software interrupt, software exception or privileged software exception. */
3433 if ( uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
3434 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
3435 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
3436 {
3437 /* Instruction length must be in the range 0-15. */
3438 if (pVmcs->u32EntryInstrLen <= VMX_ENTRY_INSTR_LEN_MAX)
3439 { /* likely */ }
3440 else
3441 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLen);
3442
3443 /* Instruction length of 0 is allowed only when its CPU feature is present. */
3444 if ( pVmcs->u32EntryInstrLen == 0
3445 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEntryInjectSoftInt)
3446 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLenZero);
3447 }
3448 }
3449 }
3450
3451 /* VM-entry MSR-load count and VM-entry MSR-load area address. */
3452 if (pVmcs->u32EntryMsrLoadCount)
3453 {
3454 if ( (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
3455 || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3456 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
3457 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad);
3458 }
3459
3460 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)); /* We don't support SMM yet. */
3461 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON)); /* We don't support dual-monitor treatment yet. */
3462
3463 NOREF(pszInstr);
3464 NOREF(pszFailure);
3465 return VINF_SUCCESS;
3466}
3467
3468
3469/**
3470 * Checks VM-exit controls fields as part of VM-entry.
3471 * See Intel spec. 26.2.1.2 "VM-Exit Control Fields".
3472 *
3473 * @returns VBox status code.
3474 * @param pVCpu The cross context virtual CPU structure.
3475 * @param pszInstr The VMX instruction name (for logging purposes).
3476 */
3477IEM_STATIC int iemVmxVmentryCheckExitCtls(PVMCPU pVCpu, const char *pszInstr)
3478{
3479 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3480 const char * const pszFailure = "VMFail";
3481
3482 /* VM-exit controls. */
3483 VMXCTLSMSR ExitCtls;
3484 ExitCtls.u = CPUMGetGuestIa32VmxExitCtls(pVCpu);
3485 if (~pVmcs->u32ExitCtls & ExitCtls.n.disallowed0)
3486 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
3487
3488 if (pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1)
3489 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1);
3490
3491 /* Save preemption timer without activating it. */
3492 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
3493 && (pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
3494 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer);
3495
3496 /* VM-exit MSR-store count and VM-exit MSR-store area address. */
3497 if (pVmcs->u32ExitMsrStoreCount)
3498 {
3499 if ( (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
3500 || (pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3501 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
3502 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore);
3503 }
3504
3505 /* VM-exit MSR-load count and VM-exit MSR-load area address. */
3506 if (pVmcs->u32ExitMsrLoadCount)
3507 {
3508 if ( (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
3509 || (pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3510 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
3511 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad);
3512 }
3513
3514 NOREF(pszInstr);
3515 NOREF(pszFailure);
3516 return VINF_SUCCESS;
3517}
3518
3519
3520/**
3521 * Checks VM-execution controls fields as part of VM-entry.
3522 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
3523 *
3524 * @returns VBox status code.
3525 * @param pVCpu The cross context virtual CPU structure.
3526 * @param pszInstr The VMX instruction name (for logging purposes).
3527 *
3528 * @remarks This may update secondary-processor based VM-execution control fields
3529 * in the current VMCS if necessary.
3530 */
3531IEM_STATIC int iemVmxVmentryCheckExecCtls(PVMCPU pVCpu, const char *pszInstr)
3532{
3533 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3534 const char * const pszFailure = "VMFail";
3535
3536 /* Pin-based VM-execution controls. */
3537 {
3538 VMXCTLSMSR PinCtls;
3539 PinCtls.u = CPUMGetGuestIa32VmxPinbasedCtls(pVCpu);
3540 if (~pVmcs->u32PinCtls & PinCtls.n.disallowed0)
3541 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
3542
3543 if (pVmcs->u32PinCtls & ~PinCtls.n.allowed1)
3544 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1);
3545 }
3546
3547 /* Processor-based VM-execution controls. */
3548 {
3549 VMXCTLSMSR ProcCtls;
3550 ProcCtls.u = CPUMGetGuestIa32VmxProcbasedCtls(pVCpu);
3551 if (~pVmcs->u32ProcCtls & ProcCtls.n.disallowed0)
3552 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
3553
3554 if (pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1)
3555 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1);
3556 }
3557
3558 /* Secondary processor-based VM-execution controls. */
3559 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
3560 {
3561 VMXCTLSMSR ProcCtls2;
3562 ProcCtls2.u = CPUMGetGuestIa32VmxProcbasedCtls2(pVCpu);
3563 if (~pVmcs->u32ProcCtls2 & ProcCtls2.n.disallowed0)
3564 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
3565
3566 if (pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1)
3567 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1);
3568 }
3569 else
3570 Assert(!pVmcs->u32ProcCtls2);
3571
3572 /* CR3-target count. */
3573 if (pVmcs->u32Cr3TargetCount <= VMX_V_CR3_TARGET_COUNT)
3574 { /* likely */ }
3575 else
3576 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Cr3TargetCount);
3577
3578 /* IO bitmaps physical addresses. */
3579 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
3580 {
3581 if ( (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK)
3582 || (pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3583 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u))
3584 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA);
3585
3586 if ( (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK)
3587 || (pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3588 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u))
3589 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
3590 }
3591
3592 /* MSR bitmap physical address. */
3593 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
3594 {
3595 if ( (pVmcs->u64AddrMsrBitmap.u & X86_PAGE_4K_OFFSET_MASK)
3596 || (pVmcs->u64AddrMsrBitmap.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3597 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrMsrBitmap.u))
3598 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
3599 }
3600
3601 /* TPR shadow related controls. */
3602 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
3603 {
3604 /* Virtual-APIC page physical address. */
3605 RTGCPHYS GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
3606 if ( (GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
3607 || (GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3608 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
3609 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
3610
3611 /* Read the Virtual-APIC page. */
3612 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
3613 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage),
3614 GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES);
3615 if (RT_FAILURE(rc))
3616 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
3617
3618 /* TPR threshold without virtual-interrupt delivery. */
3619 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
3620 && (pVmcs->u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK))
3621 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdRsvd);
3622
3623 /* TPR threshold and VTPR. */
3624 uint8_t const *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
3625 uint8_t const u8VTpr = *(pbVirtApic + XAPIC_OFF_TPR);
3626 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
3627 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
3628 && RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) > ((u8VTpr >> 4) & UINT32_C(0xf)) /* Bits 4:7 */)
3629 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
3630 }
3631 else
3632 {
3633 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
3634 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
3635 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
3636 { /* likely */ }
3637 else
3638 {
3639 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
3640 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicTprShadow);
3641 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
3642 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ApicRegVirt);
3643 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
3644 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtIntDelivery);
3645 }
3646 }
3647
3648 /* NMI exiting and virtual-NMIs. */
3649 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
3650 && (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
3651 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi);
3652
3653 /* Virtual-NMIs and NMI-window exiting. */
3654 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
3655 && (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
3656 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit);
3657
3658 /* Virtualize APIC accesses. */
3659 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
3660 {
3661 /* APIC-access physical address. */
3662 RTGCPHYS GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
3663 if ( (GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
3664 || (GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3665 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
3666 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
3667 }
3668
3669 /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */
3670 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
3671 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
3672 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
3673
3674 /* Virtual-interrupt delivery requires external interrupt exiting. */
3675 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
3676 && !(pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
3677 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
3678
3679 /* VPID. */
3680 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID)
3681 || pVmcs->u16Vpid != 0)
3682 { /* likely */ }
3683 else
3684 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Vpid);
3685
3686 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_POSTED_INT)); /* We don't support posted interrupts yet. */
3687 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)); /* We don't support EPT yet. */
3688 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PML)); /* We don't support PML yet. */
3689 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)); /* We don't support Unrestricted-guests yet. */
3690 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMFUNC)); /* We don't support VM functions yet. */
3691 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT_VE)); /* We don't support EPT-violation #VE yet. */
3692
3693 /* VMCS shadowing. */
3694 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3695 {
3696 /* VMREAD-bitmap physical address. */
3697 RTGCPHYS GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
3698 if ( ( GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
3699 || ( GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3700 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
3701 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap);
3702
3703 /* VMWRITE-bitmap physical address. */
3704 RTGCPHYS GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
3705 if ( ( GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
3706 || ( GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3707 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
3708 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
3709
3710 /* Read the VMREAD-bitmap. */
3711 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
3712 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap),
3713 GCPhysVmreadBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
3714 if (RT_FAILURE(rc))
3715 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys);
3716
3717 /* Read the VMWRITE-bitmap. */
3718 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap));
3719 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap),
3720 GCPhysVmwriteBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
3721 if (RT_FAILURE(rc))
3722 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys);
3723 }
3724
3725 NOREF(pszInstr);
3726 NOREF(pszFailure);
3727 return VINF_SUCCESS;
3728}
3729
3730
3731/**
3732 * VMLAUNCH/VMRESUME instruction execution worker.
3733 *
3734 * @param pVCpu The cross context virtual CPU structure.
3735 * @param cbInstr The instruction length.
3736 * @param uInstrId The instruction identity (either VMXINSTRID_VMLAUNCH or
3737 * VMXINSTRID_VMRESUME).
3738 * @param pExitInfo Pointer to the VM-exit instruction information struct.
3739 * Optional, can be NULL.
3740 *
3741 * @remarks Common VMX instruction checks are already expected to by the caller,
3742 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
3743 */
3744IEM_STATIC VBOXSTRICTRC iemVmxVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId, PCVMXVEXITINFO pExitInfo)
3745{
3746 Assert( uInstrId == VMXINSTRID_VMLAUNCH
3747 || uInstrId == VMXINSTRID_VMRESUME);
3748
3749 const char *pszInstr = uInstrId == VMXINSTRID_VMLAUNCH ? "vmlaunch" : "vmresume";
3750 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
3751 {
3752 RT_NOREF(pExitInfo);
3753 /** @todo NSTVMX: intercept. */
3754 }
3755 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
3756
3757 /* CPL. */
3758 if (pVCpu->iem.s.uCpl > 0)
3759 {
3760 Log(("%s: CPL %u -> #GP(0)\n", pszInstr, pVCpu->iem.s.uCpl));
3761 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_Cpl;
3762 return iemRaiseGeneralProtectionFault0(pVCpu);
3763 }
3764
3765 /* Current VMCS valid. */
3766 if (!IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
3767 {
3768 Log(("%s: VMCS pointer %#RGp invalid -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
3769 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrInvalid;
3770 iemVmxVmFailInvalid(pVCpu);
3771 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3772 return VINF_SUCCESS;
3773 }
3774
3775 /** @todo Distinguish block-by-MOV-SS from block-by-STI. Currently we
3776 * use block-by-STI here which is not quite correct. */
3777 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3778 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
3779 {
3780 Log(("%s: VM entry with events blocked by MOV SS -> VMFail\n", pszInstr));
3781 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_BlocKMovSS;
3782 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_BLOCK_MOVSS);
3783 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3784 return VINF_SUCCESS;
3785 }
3786
3787 if (uInstrId == VMXINSTRID_VMLAUNCH)
3788 {
3789 /* VMLAUNCH with non-clear VMCS. */
3790 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState != VMX_V_VMCS_STATE_CLEAR)
3791 {
3792 Log(("vmlaunch: VMLAUNCH with non-clear VMCS -> VMFail\n"));
3793 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsClear;
3794 iemVmxVmFail(pVCpu, VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS);
3795 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3796 return VINF_SUCCESS;
3797 }
3798 }
3799 else
3800 {
3801 /* VMRESUME with non-launched VMCS. */
3802 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState != VMX_V_VMCS_STATE_LAUNCHED)
3803 {
3804 Log(("vmresume: VMRESUME with non-launched VMCS -> VMFail\n"));
3805 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsLaunch;
3806 iemVmxVmFail(pVCpu, VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS);
3807 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3808 return VINF_SUCCESS;
3809 }
3810 }
3811
3812 /*
3813 * Load the current VMCS.
3814 */
3815 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
3816 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs),
3817 IEM_VMX_GET_CURRENT_VMCS(pVCpu), VMX_V_VMCS_SIZE);
3818 if (RT_FAILURE(rc))
3819 {
3820 Log(("%s: Failed to read VMCS at %#RGp, rc=%Rrc\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu), rc));
3821 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrReadPhys;
3822 return rc;
3823 }
3824
3825 /*
3826 * Check VM-execution control fields.
3827 */
3828 rc = iemVmxVmentryCheckExecCtls(pVCpu, pszInstr);
3829 if (rc == VINF_SUCCESS)
3830 { /* likely */ }
3831 else
3832 {
3833 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
3834 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3835 return VINF_SUCCESS;
3836 }
3837
3838 /*
3839 * Check VM-exit control fields.
3840 */
3841 rc = iemVmxVmentryCheckExitCtls(pVCpu, pszInstr);
3842 if (rc == VINF_SUCCESS)
3843 { /* likely */ }
3844 else
3845 {
3846 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
3847 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3848 return VINF_SUCCESS;
3849 }
3850
3851 /*
3852 * Check VM-entry control fields.
3853 */
3854 rc = iemVmxVmentryCheckEntryCtls(pVCpu, pszInstr);
3855 if (rc == VINF_SUCCESS)
3856 { /* likely */ }
3857 else
3858 {
3859 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
3860 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3861 return VINF_SUCCESS;
3862 }
3863
3864 /*
3865 * Check host-state fields.
3866 */
3867 rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr);
3868 if (rc == VINF_SUCCESS)
3869 { /* likely */ }
3870 else
3871 {
3872 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_HOST_STATE);
3873 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3874 return VINF_SUCCESS;
3875 }
3876
3877 /*
3878 * Check guest-state fields.
3879 */
3880 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
3881 if (rc == VINF_SUCCESS)
3882 { /* likely */ }
3883 else
3884 {
3885 /* VMExit. */
3886 return VINF_SUCCESS;
3887 }
3888
3889 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_Success;
3890 iemVmxVmSucceed(pVCpu);
3891 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3892 return VERR_IEM_IPE_2;
3893}
3894
3895
3896/**
3897 * Implements 'VMXON'.
3898 */
3899IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
3900{
3901 return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
3902}
3903
3904
3905/**
3906 * Implements 'VMXOFF'.
3907 *
3908 * @remarks Common VMX instruction checks are already expected to by the caller,
3909 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
3910 */
3911IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
3912{
3913# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
3914 RT_NOREF2(pVCpu, cbInstr);
3915 return VINF_EM_RAW_EMULATE_INSTR;
3916# else
3917 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
3918 {
3919 /** @todo NSTVMX: intercept. */
3920 }
3921
3922 /* CPL. */
3923 if (pVCpu->iem.s.uCpl > 0)
3924 {
3925 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
3926 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxoff_Cpl;
3927 return iemRaiseGeneralProtectionFault0(pVCpu);
3928 }
3929
3930 /* Dual monitor treatment of SMIs and SMM. */
3931 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
3932 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
3933 {
3934 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
3935 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3936 return VINF_SUCCESS;
3937 }
3938
3939 /*
3940 * Record that we're no longer in VMX root operation, block INIT, block and disable A20M.
3941 */
3942 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
3943 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
3944
3945 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
3946 { /** @todo NSTVMX: Unblock SMI. */ }
3947 /** @todo NSTVMX: Unblock and enable A20M. */
3948 /** @todo NSTVMX: Clear address-range monitoring. */
3949
3950 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxoff_Success;
3951 iemVmxVmSucceed(pVCpu);
3952 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
3953# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
3954 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
3955# else
3956 return VINF_SUCCESS;
3957# endif
3958# endif
3959}
3960
3961
3962/**
3963 * Implements 'VMLAUNCH'.
3964 */
3965IEM_CIMPL_DEF_0(iemCImpl_vmlaunch)
3966{
3967 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMLAUNCH, NULL /* pExitInfo */);
3968}
3969
3970
3971/**
3972 * Implements 'VMRESUME'.
3973 */
3974IEM_CIMPL_DEF_0(iemCImpl_vmresume)
3975{
3976 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMRESUME, NULL /* pExitInfo */);
3977}
3978
3979
3980/**
3981 * Implements 'VMPTRLD'.
3982 */
3983IEM_CIMPL_DEF_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
3984{
3985 return iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
3986}
3987
3988
3989/**
3990 * Implements 'VMPTRST'.
3991 */
3992IEM_CIMPL_DEF_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
3993{
3994 return iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
3995}
3996
3997
3998/**
3999 * Implements 'VMCLEAR'.
4000 */
4001IEM_CIMPL_DEF_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
4002{
4003 return iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
4004}
4005
4006
4007/**
4008 * Implements 'VMWRITE' register.
4009 */
4010IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64FieldEnc)
4011{
4012 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, IEMMODE_64BIT /* N/A */, u64Val, u64FieldEnc,
4013 NULL /* pExitInfo */);
4014}
4015
4016
4017/**
4018 * Implements 'VMWRITE' memory.
4019 */
4020IEM_CIMPL_DEF_4(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrVal, uint32_t, u64FieldEnc)
4021{
4022 return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrVal, u64FieldEnc, NULL /* pExitInfo */);
4023}
4024
4025
4026/**
4027 * Implements 'VMREAD' 64-bit register.
4028 */
4029IEM_CIMPL_DEF_2(iemCImpl_vmread64_reg, uint64_t *, pu64Dst, uint64_t, u64FieldEnc)
4030{
4031 return iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, NULL /* pExitInfo */);
4032}
4033
4034
4035/**
4036 * Implements 'VMREAD' 32-bit register.
4037 */
4038IEM_CIMPL_DEF_2(iemCImpl_vmread32_reg, uint32_t *, pu32Dst, uint32_t, u32FieldEnc)
4039{
4040 return iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u32FieldEnc, NULL /* pExitInfo */);
4041}
4042
4043
4044/**
4045 * Implements 'VMREAD' memory.
4046 */
4047IEM_CIMPL_DEF_4(iemCImpl_vmread_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrDst, uint32_t, u64FieldEnc)
4048{
4049 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, u64FieldEnc, NULL /* pExitInfo */);
4050}
4051
4052#endif
4053
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette