VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 73969

Last change on this file since 73969 was 73969, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 Fix typo in VMWRITE impl.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 75.5 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 73969 2018-08-30 03:41:13Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/**
20 * Implements 'VMCALL'.
21 */
22IEM_CIMPL_DEF_0(iemCImpl_vmcall)
23{
24 /** @todo NSTVMX: intercept. */
25
26 /* Join forces with vmmcall. */
27 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
28}
29
30#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
31/**
32 * Map of VMCS field encodings to their virtual-VMCS structure offsets.
33 *
34 * The first array dimension is VMCS field encoding of Width OR'ed with Type and the
35 * second dimension is the Index, see VMXVMCSFIELDENC.
36 */
37uint16_t const g_aoffVmcsMap[16][VMX_V_VMCS_MAX_INDEX + 1] =
38{
39 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
40 {
41 /* 0 */ RT_OFFSETOF(VMXVVMCS, u16Vpid),
42 /* 1 */ RT_OFFSETOF(VMXVVMCS, u16PostIntNotifyVector),
43 /* 2 */ RT_OFFSETOF(VMXVVMCS, u16EptpIndex),
44 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
45 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
46 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
47 },
48 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
49 {
50 /* 0-7 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
51 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
52 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
53 /* 24-25 */ UINT16_MAX, UINT16_MAX
54 },
55 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
56 {
57 /* 0 */ RT_OFFSETOF(VMXVVMCS, GuestEs),
58 /* 1 */ RT_OFFSETOF(VMXVVMCS, GuestCs),
59 /* 2 */ RT_OFFSETOF(VMXVVMCS, GuestSs),
60 /* 3 */ RT_OFFSETOF(VMXVVMCS, GuestDs),
61 /* 4 */ RT_OFFSETOF(VMXVVMCS, GuestFs),
62 /* 5 */ RT_OFFSETOF(VMXVVMCS, GuestGs),
63 /* 6 */ RT_OFFSETOF(VMXVVMCS, GuestLdtr),
64 /* 7 */ RT_OFFSETOF(VMXVVMCS, GuestTr),
65 /* 8 */ RT_OFFSETOF(VMXVVMCS, u16GuestIntStatus),
66 /* 9 */ RT_OFFSETOF(VMXVVMCS, u16PmlIndex),
67 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
68 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
69 },
70 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
71 {
72 /* 0 */ RT_OFFSETOF(VMXVVMCS, HostEs),
73 /* 1 */ RT_OFFSETOF(VMXVVMCS, HostCs),
74 /* 2 */ RT_OFFSETOF(VMXVVMCS, HostSs),
75 /* 3 */ RT_OFFSETOF(VMXVVMCS, HostDs),
76 /* 4 */ RT_OFFSETOF(VMXVVMCS, HostFs),
77 /* 5 */ RT_OFFSETOF(VMXVVMCS, HostGs),
78 /* 6 */ RT_OFFSETOF(VMXVVMCS, HostTr),
79 /* 7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
80 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
81 /* 23-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
82 },
83 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
84 {
85 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapA),
86 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
87 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
88 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmExitMsrStore),
89 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmExitMsrLoad),
90 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmEntryMsrLoad),
91 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
92 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64AddrPml),
93 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64TscOffset),
94 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64AddrVirtApic),
95 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64AddrApicAccess),
96 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64AddrPostedIntDesc),
97 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64VmFuncCtls),
98 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64EptpPtr),
99 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap0),
100 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap1),
101 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap2),
102 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap3),
103 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64AddrEptpList),
104 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmreadBitmap),
105 /* 20 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmwriteBitmap),
106 /* 21 */ RT_OFFSETOF(VMXVVMCS, u64AddrXcptVeInfo),
107 /* 22 */ RT_OFFSETOF(VMXVVMCS, u64AddrXssBitmap),
108 /* 23 */ RT_OFFSETOF(VMXVVMCS, u64AddrEnclsBitmap),
109 /* 24 */ UINT16_MAX,
110 /* 25 */ RT_OFFSETOF(VMXVVMCS, u64TscMultiplier)
111 },
112 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
113 {
114 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64GuestPhysAddr),
115 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
116 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
117 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
118 /* 25 */ UINT16_MAX
119 },
120 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
121 {
122 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
123 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
124 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestPatMsr),
125 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEferMsr),
126 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
127 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte0),
128 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte1),
129 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte2),
130 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte3),
131 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
132 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
133 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
134 },
135 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
136 {
137 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostPatMsr),
138 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostEferMsr),
139 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
140 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
141 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
142 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
143 },
144 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
145 {
146 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32PinCtls),
147 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls),
148 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32XcptBitmap),
149 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMask),
150 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMatch),
151 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32Cr3TargetCount),
152 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32ExitCtls),
153 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
154 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
155 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32EntryCtls),
156 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
157 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32EntryIntInfo),
158 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
159 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32EntryInstrLen),
160 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32TprTreshold),
161 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls2),
162 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32PleGap),
163 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32PleWindow),
164 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
165 },
166 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
167 {
168 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32RoVmInstrError),
169 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitReason),
170 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitIntInfo),
171 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitErrCode),
172 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
173 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
174 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitInstrLen),
175 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitInstrInfo),
176 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
177 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
178 /* 24-25 */ UINT16_MAX, UINT16_MAX
179 },
180 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
181 {
182 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
183 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsLimit),
184 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsLimit),
185 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsLimit),
186 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
187 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsLimit),
188 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsLimit),
189 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
190 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrLimit),
191 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
192 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
193 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsAttr),
194 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsAttr),
195 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsAttr),
196 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsAttr),
197 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsAttr),
198 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsAttr),
199 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
200 /* 18 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrAttr),
201 /* 19 */ RT_OFFSETOF(VMXVVMCS, u32GuestIntrState),
202 /* 20 */ RT_OFFSETOF(VMXVVMCS, u32GuestActivityState),
203 /* 21 */ RT_OFFSETOF(VMXVVMCS, u32GuestSmBase),
204 /* 22 */ RT_OFFSETOF(VMXVVMCS, u32GuestSysenterCS),
205 /* 23 */ RT_OFFSETOF(VMXVVMCS, u32PreemptTimer),
206 /* 24-25 */ UINT16_MAX, UINT16_MAX
207 },
208 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
209 {
210 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32HostSysenterCs),
211 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
212 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
213 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
214 /* 25 */ UINT16_MAX
215 },
216 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_CONTROL: */
217 {
218 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64Cr0Mask),
219 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64Cr4Mask),
220 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
221 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
222 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target0),
223 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target1),
224 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target2),
225 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target3),
226 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
227 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
228 /* 24-25 */ UINT16_MAX, UINT16_MAX
229 },
230 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
231 {
232 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64ExitQual),
233 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64IoRcx),
234 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64IoRsi),
235 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64IoRdi),
236 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64IoRip),
237 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestLinearAddr),
238 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
239 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
240 /* 22-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
241 },
242 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
243 {
244 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr0),
245 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr3),
246 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr4),
247 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEsBase),
248 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestCsBase),
249 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestSsBase),
250 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestDsBase),
251 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestFsBase),
252 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestGsBase),
253 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestLdtrBase),
254 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64GuestTrBase),
255 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64GuestGdtrBase),
256 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64GuestIdtrBase),
257 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64GuestDr7),
258 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64GuestRsp),
259 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64GuestRip),
260 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64GuestRFlags),
261 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpt),
262 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
263 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEip),
264 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
265 },
266 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_HOST_STATE: */
267 {
268 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostCr0),
269 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostCr3),
270 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostCr4),
271 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64HostFsBase),
272 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64HostGsBase),
273 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64HostTrBase),
274 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64HostGdtrBase),
275 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64HostIdtrBase),
276 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEsp),
277 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEip),
278 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64HostRsp),
279 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64HostRip),
280 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
281 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
282 }
283};
284
285
286/**
287 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
288 * relative offsets.
289 */
290# ifdef IEM_WITH_CODE_TLB
291# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
292# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
293# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
294# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
295# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
296# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
297# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
298# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
299# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
300# else /* !IEM_WITH_CODE_TLB */
301# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
302 do \
303 { \
304 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
305 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
306 } while (0)
307
308# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
309
310# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
311 do \
312 { \
313 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
314 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
315 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
316 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
317 } while (0)
318
319# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
320 do \
321 { \
322 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
323 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
324 } while (0)
325
326# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
327 do \
328 { \
329 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
330 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
331 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
332 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
333 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
334 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
335 } while (0)
336
337# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
338 do \
339 { \
340 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
341 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
342 } while (0)
343
344# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
345 do \
346 { \
347 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
348 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
349 } while (0)
350
351# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
352 do \
353 { \
354 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
355 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
356 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
357 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
358 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
359 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
360 } while (0)
361# endif /* !IEM_WITH_CODE_TLB */
362
363/** Whether a shadow VMCS is present for the given VCPU. */
364#define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
365
366/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
367#define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u64VmcsLinkPtr.u)
368
369/** Whether a current VMCS is present for the given VCPU. */
370#define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
371
372/** Gets the guest-physical address of the current VMCS for the given VCPU. */
373#define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
374
375/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
376#define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
377 do \
378 { \
379 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
380 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
381 } while (0)
382
383/** Clears any current VMCS for the given VCPU. */
384#define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
385 do \
386 { \
387 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
388 } while (0)
389
390
391/**
392 * Returns whether the given VMCS field is valid and supported by our emulation.
393 *
394 * @param pVCpu The cross context virtual CPU structure.
395 * @param uFieldEnc The VMCS field encoding.
396 *
397 * @remarks This takes into account the CPU features exposed to the guest.
398 */
399IEM_STATIC bool iemVmxIsVmcsFieldValid(PVMCPU pVCpu, uint32_t uFieldEnc)
400{
401 PCCPUMFEATURES pFeat = IEM_GET_GUEST_CPU_FEATURES(pVCpu);
402 switch (uFieldEnc)
403 {
404 /*
405 * 16-bit fields.
406 */
407 /* Control fields. */
408 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
409 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
410 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
411
412 /* Guest-state fields. */
413 case VMX_VMCS16_GUEST_ES_SEL:
414 case VMX_VMCS16_GUEST_CS_SEL:
415 case VMX_VMCS16_GUEST_SS_SEL:
416 case VMX_VMCS16_GUEST_DS_SEL:
417 case VMX_VMCS16_GUEST_FS_SEL:
418 case VMX_VMCS16_GUEST_GS_SEL:
419 case VMX_VMCS16_GUEST_LDTR_SEL:
420 case VMX_VMCS16_GUEST_TR_SEL:
421 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
422 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
423
424 /* Host-state fields. */
425 case VMX_VMCS16_HOST_ES_SEL:
426 case VMX_VMCS16_HOST_CS_SEL:
427 case VMX_VMCS16_HOST_SS_SEL:
428 case VMX_VMCS16_HOST_DS_SEL:
429 case VMX_VMCS16_HOST_FS_SEL:
430 case VMX_VMCS16_HOST_GS_SEL:
431 case VMX_VMCS16_HOST_TR_SEL: return true;
432
433 /*
434 * 64-bit fields.
435 */
436 /* Control fields. */
437 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
438 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
439 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
440 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
441 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
442 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
443 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
444 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
445 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
446 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
447 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
448 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
449 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
450 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
451 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
452 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
453 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
454 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
455 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
456 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
457 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
458 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
459 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
460 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
461 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
462 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
463 case VMX_VMCS64_CTRL_EPTP_FULL:
464 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
465 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
466 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
467 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
468 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
469 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
470 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
471 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
472 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
473 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
474 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
475 {
476 uint64_t const uVmFuncMsr = CPUMGetGuestIa32VmxVmFunc(pVCpu);
477 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
478 }
479 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
480 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
481 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
482 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
483 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
484 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
485 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
486 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
487 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
488 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
489 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
490 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
491
492 /* Read-only data fields. */
493 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
494 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
495
496 /* Guest-state fields. */
497 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
498 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
499 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
500 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
501 case VMX_VMCS64_GUEST_PAT_FULL:
502 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
503 case VMX_VMCS64_GUEST_EFER_FULL:
504 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
505 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
506 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
507 case VMX_VMCS64_GUEST_PDPTE0_FULL:
508 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
509 case VMX_VMCS64_GUEST_PDPTE1_FULL:
510 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
511 case VMX_VMCS64_GUEST_PDPTE2_FULL:
512 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
513 case VMX_VMCS64_GUEST_PDPTE3_FULL:
514 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
515 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
516 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
517
518 /* Host-state fields. */
519 case VMX_VMCS64_HOST_PAT_FULL:
520 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
521 case VMX_VMCS64_HOST_EFER_FULL:
522 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
523 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
524 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
525
526 /*
527 * 32-bit fields.
528 */
529 /* Control fields. */
530 case VMX_VMCS32_CTRL_PIN_EXEC:
531 case VMX_VMCS32_CTRL_PROC_EXEC:
532 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
533 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
534 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
535 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
536 case VMX_VMCS32_CTRL_EXIT:
537 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
538 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
539 case VMX_VMCS32_CTRL_ENTRY:
540 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
541 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
542 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
543 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
544 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
545 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
546 case VMX_VMCS32_CTRL_PLE_GAP:
547 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
548
549 /* Read-only data fields. */
550 case VMX_VMCS32_RO_VM_INSTR_ERROR:
551 case VMX_VMCS32_RO_EXIT_REASON:
552 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
553 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
554 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
555 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
556 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
557 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
558
559 /* Guest-state fields. */
560 case VMX_VMCS32_GUEST_ES_LIMIT:
561 case VMX_VMCS32_GUEST_CS_LIMIT:
562 case VMX_VMCS32_GUEST_SS_LIMIT:
563 case VMX_VMCS32_GUEST_DS_LIMIT:
564 case VMX_VMCS32_GUEST_FS_LIMIT:
565 case VMX_VMCS32_GUEST_GS_LIMIT:
566 case VMX_VMCS32_GUEST_LDTR_LIMIT:
567 case VMX_VMCS32_GUEST_TR_LIMIT:
568 case VMX_VMCS32_GUEST_GDTR_LIMIT:
569 case VMX_VMCS32_GUEST_IDTR_LIMIT:
570 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
571 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
572 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
573 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
574 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
575 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
576 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
577 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
578 case VMX_VMCS32_GUEST_INT_STATE:
579 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
580 case VMX_VMCS32_GUEST_SMBASE:
581 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
582 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
583
584 /* Host-state fields. */
585 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
586
587 /*
588 * Natural-width fields.
589 */
590 /* Control fields. */
591 case VMX_VMCS_CTRL_CR0_MASK:
592 case VMX_VMCS_CTRL_CR4_MASK:
593 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
594 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
595 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
596 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
597 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
598 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
599
600 /* Read-only data fields. */
601 case VMX_VMCS_RO_EXIT_QUALIFICATION:
602 case VMX_VMCS_RO_IO_RCX:
603 case VMX_VMCS_RO_IO_RSX:
604 case VMX_VMCS_RO_IO_RDI:
605 case VMX_VMCS_RO_IO_RIP:
606 case VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR: return true;
607
608 /* Guest-state fields. */
609 case VMX_VMCS_GUEST_CR0:
610 case VMX_VMCS_GUEST_CR3:
611 case VMX_VMCS_GUEST_CR4:
612 case VMX_VMCS_GUEST_ES_BASE:
613 case VMX_VMCS_GUEST_CS_BASE:
614 case VMX_VMCS_GUEST_SS_BASE:
615 case VMX_VMCS_GUEST_DS_BASE:
616 case VMX_VMCS_GUEST_FS_BASE:
617 case VMX_VMCS_GUEST_GS_BASE:
618 case VMX_VMCS_GUEST_LDTR_BASE:
619 case VMX_VMCS_GUEST_TR_BASE:
620 case VMX_VMCS_GUEST_GDTR_BASE:
621 case VMX_VMCS_GUEST_IDTR_BASE:
622 case VMX_VMCS_GUEST_DR7:
623 case VMX_VMCS_GUEST_RSP:
624 case VMX_VMCS_GUEST_RIP:
625 case VMX_VMCS_GUEST_RFLAGS:
626 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
627 case VMX_VMCS_GUEST_SYSENTER_ESP:
628 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
629
630 /* Host-state fields. */
631 case VMX_VMCS_HOST_CR0:
632 case VMX_VMCS_HOST_CR3:
633 case VMX_VMCS_HOST_CR4:
634 case VMX_VMCS_HOST_FS_BASE:
635 case VMX_VMCS_HOST_GS_BASE:
636 case VMX_VMCS_HOST_TR_BASE:
637 case VMX_VMCS_HOST_GDTR_BASE:
638 case VMX_VMCS_HOST_IDTR_BASE:
639 case VMX_VMCS_HOST_SYSENTER_ESP:
640 case VMX_VMCS_HOST_SYSENTER_EIP:
641 case VMX_VMCS_HOST_RSP:
642 case VMX_VMCS_HOST_RIP: return true;
643 }
644
645 return false;
646}
647
648
649/**
650 * Gets VM-exit instruction information along with any displacement for an
651 * instruction VM-exit.
652 *
653 * @returns The VM-exit instruction information.
654 * @param pVCpu The cross context virtual CPU structure.
655 * @param uExitReason The VM-exit reason.
656 * @param InstrId The VM-exit instruction identity (VMX_INSTR_ID_XXX) if
657 * any. Pass VMX_INSTR_ID_NONE otherwise.
658 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
659 * NULL.
660 */
661IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID InstrId, PRTGCPTR pGCPtrDisp)
662{
663 RTGCPTR GCPtrDisp;
664 VMXEXITINSTRINFO ExitInstrInfo;
665 ExitInstrInfo.u = 0;
666
667 /*
668 * Get and parse the ModR/M byte from our decoded opcodes.
669 */
670 uint8_t bRm;
671 uint8_t const offModRm = pVCpu->iem.s.offModRm;
672 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
673 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
674 {
675 /*
676 * ModR/M indicates register addressing.
677 */
678 ExitInstrInfo.All.u2Scaling = 0;
679 ExitInstrInfo.All.iReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
680 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
681 ExitInstrInfo.All.fIsRegOperand = 1;
682 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
683 ExitInstrInfo.All.iSegReg = 0;
684 ExitInstrInfo.All.iIdxReg = 0;
685 ExitInstrInfo.All.fIdxRegInvalid = 1;
686 ExitInstrInfo.All.iBaseReg = 0;
687 ExitInstrInfo.All.fBaseRegInvalid = 1;
688 ExitInstrInfo.All.iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
689
690 /* Displacement not applicable for register addressing. */
691 GCPtrDisp = 0;
692 }
693 else
694 {
695 /*
696 * ModR/M indicates memory addressing.
697 */
698 uint8_t uScale = 0;
699 bool fBaseRegValid = false;
700 bool fIdxRegValid = false;
701 uint8_t iBaseReg = 0;
702 uint8_t iIdxReg = 0;
703 uint8_t iReg2 = 0;
704 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
705 {
706 /*
707 * Parse the ModR/M, displacement for 16-bit addressing mode.
708 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
709 */
710 uint16_t u16Disp = 0;
711 uint8_t const offDisp = offModRm + sizeof(bRm);
712 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
713 {
714 /* Displacement without any registers. */
715 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
716 }
717 else
718 {
719 /* Register (index and base). */
720 switch (bRm & X86_MODRM_RM_MASK)
721 {
722 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
723 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
724 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
725 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
726 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
727 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
728 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
729 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
730 }
731
732 /* Register + displacement. */
733 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
734 {
735 case 0: break;
736 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
737 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
738 default:
739 {
740 /* Register addressing, handled at the beginning. */
741 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
742 break;
743 }
744 }
745 }
746
747 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
748 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
749 iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
750 }
751 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
752 {
753 /*
754 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
755 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
756 */
757 uint32_t u32Disp = 0;
758 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
759 {
760 /* Displacement without any registers. */
761 uint8_t const offDisp = offModRm + sizeof(bRm);
762 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
763 }
764 else
765 {
766 /* Register (and perhaps scale, index and base). */
767 uint8_t offDisp = offModRm + sizeof(bRm);
768 iBaseReg = (bRm & X86_MODRM_RM_MASK);
769 if (iBaseReg == 4)
770 {
771 /* An SIB byte follows the ModR/M byte, parse it. */
772 uint8_t bSib;
773 uint8_t const offSib = offModRm + sizeof(bRm);
774 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
775
776 /* A displacement may follow SIB, update its offset. */
777 offDisp += sizeof(bSib);
778
779 /* Get the scale. */
780 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
781
782 /* Get the index register. */
783 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
784 fIdxRegValid = RT_BOOL(iIdxReg != 4);
785
786 /* Get the base register. */
787 iBaseReg = bSib & X86_SIB_BASE_MASK;
788 fBaseRegValid = true;
789 if (iBaseReg == 5)
790 {
791 if ((bRm & X86_MODRM_MOD_MASK) == 0)
792 {
793 /* Mod is 0 implies a 32-bit displacement with no base. */
794 fBaseRegValid = false;
795 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
796 }
797 else
798 {
799 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
800 iBaseReg = X86_GREG_xBP;
801 }
802 }
803 }
804
805 /* Register + displacement. */
806 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
807 {
808 case 0: /* Handled above */ break;
809 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
810 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
811 default:
812 {
813 /* Register addressing, handled at the beginning. */
814 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
815 break;
816 }
817 }
818 }
819
820 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
821 iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
822 }
823 else
824 {
825 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
826
827 /*
828 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
829 * See Intel instruction spec. 2.2 "IA-32e Mode".
830 */
831 uint64_t u64Disp = 0;
832 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
833 if (fRipRelativeAddr)
834 {
835 /*
836 * RIP-relative addressing mode.
837 *
838 * The displacment is 32-bit signed implying an offset range of +/-2G.
839 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
840 */
841 uint8_t const offDisp = offModRm + sizeof(bRm);
842 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
843 }
844 else
845 {
846 uint8_t offDisp = offModRm + sizeof(bRm);
847
848 /*
849 * Register (and perhaps scale, index and base).
850 *
851 * REX.B extends the most-significant bit of the base register. However, REX.B
852 * is ignored while determining whether an SIB follows the opcode. Hence, we
853 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
854 *
855 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
856 */
857 iBaseReg = (bRm & X86_MODRM_RM_MASK);
858 if (iBaseReg == 4)
859 {
860 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
861 uint8_t bSib;
862 uint8_t const offSib = offModRm + sizeof(bRm);
863 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
864
865 /* Displacement may follow SIB, update its offset. */
866 offDisp += sizeof(bSib);
867
868 /* Get the scale. */
869 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
870
871 /* Get the index. */
872 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
873 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
874
875 /* Get the base. */
876 iBaseReg = (bSib & X86_SIB_BASE_MASK);
877 fBaseRegValid = true;
878 if (iBaseReg == 5)
879 {
880 if ((bRm & X86_MODRM_MOD_MASK) == 0)
881 {
882 /* Mod is 0 implies a signed 32-bit displacement with no base. */
883 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
884 }
885 else
886 {
887 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
888 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
889 }
890 }
891 }
892 iBaseReg |= pVCpu->iem.s.uRexB;
893
894 /* Register + displacement. */
895 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
896 {
897 case 0: /* Handled above */ break;
898 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
899 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
900 default:
901 {
902 /* Register addressing, handled at the beginning. */
903 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
904 break;
905 }
906 }
907 }
908
909 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
910 iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
911 }
912
913 ExitInstrInfo.All.u2Scaling = uScale;
914 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory instructions. */
915 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
916 ExitInstrInfo.All.fIsRegOperand = 0;
917 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
918 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
919 ExitInstrInfo.All.iIdxReg = iIdxReg;
920 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
921 ExitInstrInfo.All.iBaseReg = iBaseReg;
922 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
923 ExitInstrInfo.All.iReg2 = iReg2;
924 }
925
926 /*
927 * Handle exceptions for certain instructions.
928 * (e.g. some instructions convey an instruction identity).
929 */
930 switch (uExitReason)
931 {
932 case VMX_EXIT_XDTR_ACCESS:
933 {
934 Assert(VMX_INSTR_ID_IS_VALID(InstrId));
935 ExitInstrInfo.GdtIdt.u2InstrId = VMX_INSTR_ID_GET_ID(InstrId);
936 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
937 break;
938 }
939
940 case VMX_EXIT_TR_ACCESS:
941 {
942 Assert(VMX_INSTR_ID_IS_VALID(InstrId));
943 ExitInstrInfo.LdtTr.u2InstrId = VMX_INSTR_ID_GET_ID(InstrId);
944 ExitInstrInfo.LdtTr.u2Undef0 = 0;
945 break;
946 }
947
948 case VMX_EXIT_RDRAND:
949 case VMX_EXIT_RDSEED:
950 {
951 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
952 break;
953 }
954 }
955
956 /* Update displacement and return the constructed VM-exit instruction information field. */
957 if (pGCPtrDisp)
958 *pGCPtrDisp = GCPtrDisp;
959 return ExitInstrInfo.u;
960}
961
962
963/**
964 * Implements VMSucceed for VMX instruction success.
965 *
966 * @param pVCpu The cross context virtual CPU structure.
967 */
968DECLINLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
969{
970 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
971}
972
973
974/**
975 * Implements VMFailInvalid for VMX instruction failure.
976 *
977 * @param pVCpu The cross context virtual CPU structure.
978 */
979DECLINLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
980{
981 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
982 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
983}
984
985
986/**
987 * Implements VMFailValid for VMX instruction failure.
988 *
989 * @param pVCpu The cross context virtual CPU structure.
990 * @param enmInsErr The VM instruction error.
991 */
992DECLINLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
993{
994 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
995 {
996 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
997 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
998 /** @todo NSTVMX: VMWrite enmInsErr to VM-instruction error field. */
999 RT_NOREF(enmInsErr);
1000 }
1001}
1002
1003
1004/**
1005 * Implements VMFail for VMX instruction failure.
1006 *
1007 * @param pVCpu The cross context virtual CPU structure.
1008 * @param enmInsErr The VM instruction error.
1009 */
1010DECLINLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1011{
1012 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1013 {
1014 iemVmxVmFailValid(pVCpu, enmInsErr);
1015 /** @todo Set VM-instruction error field in the current virtual-VMCS. */
1016 }
1017 else
1018 iemVmxVmFailInvalid(pVCpu);
1019}
1020
1021
1022/**
1023 * Flushes the current VMCS contents back to guest memory.
1024 *
1025 * @returns VBox status code.
1026 * @param pVCpu The cross context virtual CPU structure.
1027 */
1028DECLINLINE(int) iemVmxCommitCurrentVmcsToMemory(PVMCPU pVCpu)
1029{
1030 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1031 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
1032 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));
1033 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
1034 return rc;
1035}
1036
1037
1038/**
1039 * VMWRITE instruction execution worker.
1040 *
1041 * @param pVCpu The cross context virtual CPU structure.
1042 * @param cbInstr The instruction length.
1043 * @param iEffSeg The effective segment register to use with @a u64Val.
1044 * Pass UINT8_MAX if it is a register access.
1045 * @param enmEffAddrMode The effective addressing mode.
1046 * @param u64Val The value to write (or guest linear address to the
1047 * value), @a iEffSeg will indicate if it's a memory
1048 * operand.
1049 * @param uFieldEnc The VMCS field encoding.
1050 * @param pExitInfo Pointer to the VM-exit information struct.
1051 */
1052IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode,
1053 uint64_t u64Val, uint32_t uFieldEnc, PCVMXVEXITINFO pExitInfo)
1054{
1055 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1056 {
1057 RT_NOREF(pExitInfo);
1058 /** @todo NSTVMX: intercept. */
1059 /** @todo NSTVMX: VMCS shadowing intercept (VMREAD/VMWRITE bitmap). */
1060 }
1061
1062 /* CPL. */
1063 if (CPUMGetGuestCPL(pVCpu) > 0)
1064 {
1065 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1066 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_Cpl;
1067 return iemRaiseGeneralProtectionFault0(pVCpu);
1068 }
1069
1070 /* VMCS pointer in root mode. */
1071 if ( IEM_IS_VMX_ROOT_MODE(pVCpu)
1072 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1073 {
1074 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
1075 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_PtrInvalid;
1076 iemVmxVmFailInvalid(pVCpu);
1077 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1078 return VINF_SUCCESS;
1079 }
1080
1081 /* VMCS-link pointer in non-root mode. */
1082 if ( IEM_IS_VMX_NON_ROOT_MODE(pVCpu)
1083 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
1084 {
1085 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
1086 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_PtrInvalid;
1087 iemVmxVmFailInvalid(pVCpu);
1088 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1089 return VINF_SUCCESS;
1090 }
1091
1092 /* If the VMWRITE instruction references memory, access the specified in memory operand. */
1093 bool const fIsRegOperand = iEffSeg == UINT8_MAX;
1094 if (!fIsRegOperand)
1095 {
1096 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
1097 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
1098 RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[enmEffAddrMode];
1099
1100 /* Read the value from the specified guest memory location. */
1101 VBOXSTRICTRC rcStrict;
1102 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1103 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
1104 else
1105 {
1106 uint32_t u32Val;
1107 rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, iEffSeg, GCPtrVal);
1108 u64Val = u32Val;
1109 }
1110 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1111 {
1112 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
1113 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_PtrMap;
1114 return rcStrict;
1115 }
1116 }
1117
1118 /* Supported VMCS field. */
1119 if (!iemVmxIsVmcsFieldValid(pVCpu, uFieldEnc))
1120 {
1121 Log(("vmwrite: VMCS field %#x invalid -> VMFail\n", uFieldEnc));
1122 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_FieldInvalid;
1123 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
1124 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1125 return VINF_SUCCESS;
1126 }
1127
1128 /* Read-only VMCS field. */
1129 bool const fReadOnlyField = HMVmxIsVmcsFieldReadOnly(uFieldEnc);
1130 if ( fReadOnlyField
1131 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
1132 {
1133 Log(("vmwrite: Write to read-only VMCS component -> VMFail\n", uFieldEnc));
1134 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_FieldRo;
1135 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
1136 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1137 return VINF_SUCCESS;
1138 }
1139
1140 /*
1141 * Setup writing to the current or shadow VMCS.
1142 */
1143 uint8_t *pbVmcs;
1144 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1145 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
1146 else
1147 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1148 Assert(pbVmcs);
1149
1150 PCVMXVMCSFIELDENC pFieldEnc = (PCVMXVMCSFIELDENC)&uFieldEnc;
1151 uint8_t const uWidth = pFieldEnc->n.u2Width;
1152 uint8_t const uType = pFieldEnc->n.u2Type;
1153 uint8_t const uWidthType = (uWidth << 2) | uType;
1154 uint8_t const uIndex = pFieldEnc->n.u8Index;
1155 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
1156 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
1157
1158 /*
1159 * Write the VMCS component based on the field's effective width.
1160 *
1161 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
1162 * indicates high bits (little endian).
1163 */
1164 uint8_t *pbField = pbVmcs + offField;
1165 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(uFieldEnc);
1166 switch (uEffWidth)
1167 {
1168 case VMX_VMCS_ENC_WIDTH_64BIT:
1169 case VMX_VMCS_ENC_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
1170 case VMX_VMCS_ENC_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break;
1171 case VMX_VMCS_ENC_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break;
1172 }
1173
1174 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_Success;
1175 iemVmxVmSucceed(pVCpu);
1176 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1177 return VINF_SUCCESS;
1178}
1179
1180
1181/**
1182 * VMCLEAR instruction execution worker.
1183 *
1184 * @param pVCpu The cross context virtual CPU structure.
1185 * @param cbInstr The instruction length.
1186 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
1187 * @param GCPtrVmcs The linear address of the VMCS pointer.
1188 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1189 * be NULL.
1190 *
1191 * @remarks Common VMX instruction checks are already expected to by the caller,
1192 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1193 */
1194IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1195 PCVMXVEXITINFO pExitInfo)
1196{
1197 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1198 {
1199 RT_NOREF(pExitInfo);
1200 /** @todo NSTVMX: intercept. */
1201 }
1202 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1203
1204 /* CPL. */
1205 if (CPUMGetGuestCPL(pVCpu) > 0)
1206 {
1207 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1208 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_Cpl;
1209 return iemRaiseGeneralProtectionFault0(pVCpu);
1210 }
1211
1212 /* Get the VMCS pointer from the location specified by the source memory operand. */
1213 RTGCPHYS GCPhysVmcs;
1214 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
1215 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1216 {
1217 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
1218 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrMap;
1219 return rcStrict;
1220 }
1221
1222 /* VMCS pointer alignment. */
1223 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
1224 {
1225 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
1226 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrAlign;
1227 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1228 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1229 return VINF_SUCCESS;
1230 }
1231
1232 /* VMCS physical-address width limits. */
1233 Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT);
1234 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
1235 {
1236 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
1237 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrWidth;
1238 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1239 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1240 return VINF_SUCCESS;
1241 }
1242
1243 /* VMCS is not the VMXON region. */
1244 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
1245 {
1246 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
1247 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrVmxon;
1248 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
1249 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1250 return VINF_SUCCESS;
1251 }
1252
1253 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
1254 restriction imposed by our implementation. */
1255 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
1256 {
1257 Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
1258 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrAbnormal;
1259 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1260 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1261 return VINF_SUCCESS;
1262 }
1263
1264 /*
1265 * VMCLEAR allows committing and clearing any valid VMCS pointer.
1266 *
1267 * If the current VMCS is the one being cleared, set its state to 'clear' and commit
1268 * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
1269 * to 'clear'.
1270 */
1271 uint8_t const fVmcsStateClear = VMX_V_VMCS_STATE_CLEAR;
1272 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
1273 {
1274 Assert(GCPhysVmcs != NIL_RTGCPHYS); /* Paranoia. */
1275 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsStateClear;
1276 iemVmxCommitCurrentVmcsToMemory(pVCpu);
1277 Assert(!IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1278 }
1279 else
1280 {
1281 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPtrVmcs + RT_OFFSETOF(VMXVVMCS, fVmcsState),
1282 (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear));
1283 }
1284
1285 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_Success;
1286 iemVmxVmSucceed(pVCpu);
1287 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1288 return rcStrict;
1289}
1290
1291
1292/**
1293 * VMPTRST instruction execution worker.
1294 *
1295 * @param pVCpu The cross context virtual CPU structure.
1296 * @param cbInstr The instruction length.
1297 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
1298 * @param GCPtrVmcs The linear address of where to store the current VMCS
1299 * pointer.
1300 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1301 * be NULL.
1302 *
1303 * @remarks Common VMX instruction checks are already expected to by the caller,
1304 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1305 */
1306IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1307 PCVMXVEXITINFO pExitInfo)
1308{
1309 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1310 {
1311 RT_NOREF(pExitInfo);
1312 /** @todo NSTVMX: intercept. */
1313 }
1314 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1315
1316 /* CPL. */
1317 if (CPUMGetGuestCPL(pVCpu) > 0)
1318 {
1319 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1320 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrst_Cpl;
1321 return iemRaiseGeneralProtectionFault0(pVCpu);
1322 }
1323
1324 /* Set the VMCS pointer to the location specified by the destination memory operand. */
1325 AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
1326 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
1327 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1328 {
1329 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrst_Success;
1330 iemVmxVmSucceed(pVCpu);
1331 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1332 return rcStrict;
1333 }
1334
1335 Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1336 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrst_PtrMap;
1337 return rcStrict;
1338}
1339
1340
1341/**
1342 * VMPTRLD instruction execution worker.
1343 *
1344 * @param pVCpu The cross context virtual CPU structure.
1345 * @param cbInstr The instruction length.
1346 * @param GCPtrVmcs The linear address of the current VMCS pointer.
1347 * @param pExitInfo Pointer to the virtual VM-exit information struct.
1348 * Optional, can be NULL.
1349 *
1350 * @remarks Common VMX instruction checks are already expected to by the caller,
1351 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1352 */
1353IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1354 PCVMXVEXITINFO pExitInfo)
1355{
1356 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1357 {
1358 RT_NOREF(pExitInfo);
1359 /** @todo NSTVMX: intercept. */
1360 }
1361 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1362
1363 /* CPL. */
1364 if (CPUMGetGuestCPL(pVCpu) > 0)
1365 {
1366 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1367 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_Cpl;
1368 return iemRaiseGeneralProtectionFault0(pVCpu);
1369 }
1370
1371 /* Get the VMCS pointer from the location specified by the source memory operand. */
1372 RTGCPHYS GCPhysVmcs;
1373 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
1374 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1375 {
1376 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
1377 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrMap;
1378 return rcStrict;
1379 }
1380
1381 /* VMCS pointer alignment. */
1382 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
1383 {
1384 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
1385 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrAlign;
1386 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1387 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1388 return VINF_SUCCESS;
1389 }
1390
1391 /* VMCS physical-address width limits. */
1392 Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT);
1393 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
1394 {
1395 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
1396 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrWidth;
1397 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1398 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1399 return VINF_SUCCESS;
1400 }
1401
1402 /* VMCS is not the VMXON region. */
1403 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
1404 {
1405 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
1406 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrVmxon;
1407 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
1408 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1409 return VINF_SUCCESS;
1410 }
1411
1412 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
1413 restriction imposed by our implementation. */
1414 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
1415 {
1416 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
1417 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrAbnormal;
1418 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1419 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1420 return VINF_SUCCESS;
1421 }
1422
1423 /* Read the VMCS revision ID from the VMCS. */
1424 VMXVMCSREVID VmcsRevId;
1425 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
1426 if (RT_FAILURE(rc))
1427 {
1428 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
1429 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrReadPhys;
1430 return rc;
1431 }
1432
1433 /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
1434 also check VMCS shadowing feature. */
1435 if ( VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
1436 || ( VmcsRevId.n.fIsShadowVmcs
1437 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
1438 {
1439 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
1440 {
1441 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
1442 VmcsRevId.n.u31RevisionId));
1443 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_VmcsRevId;
1444 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
1445 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1446 return VINF_SUCCESS;
1447 }
1448
1449 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
1450 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_ShadowVmcs;
1451 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
1452 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1453 return VINF_SUCCESS;
1454 }
1455
1456 /*
1457 * We only maintain only the current VMCS in our virtual CPU context (CPUMCTX). Therefore,
1458 * VMPTRLD shall always flush any existing current VMCS back to guest memory before loading
1459 * a new VMCS as current.
1460 */
1461 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
1462 {
1463 iemVmxCommitCurrentVmcsToMemory(pVCpu);
1464 IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
1465 }
1466 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_Success;
1467 iemVmxVmSucceed(pVCpu);
1468 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1469 return VINF_SUCCESS;
1470}
1471
1472
1473/**
1474 * VMXON instruction execution worker.
1475 *
1476 * @param pVCpu The cross context virtual CPU structure.
1477 * @param cbInstr The instruction length.
1478 * @param iEffSeg The effective segment register to use with @a
1479 * GCPtrVmxon.
1480 * @param GCPtrVmxon The linear address of the VMXON pointer.
1481 * @param pExitInfo Pointer to the VM-exit instruction information struct.
1482 * Optional, can be NULL.
1483 *
1484 * @remarks Common VMX instruction checks are already expected to by the caller,
1485 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1486 */
1487IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
1488 PCVMXVEXITINFO pExitInfo)
1489{
1490#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1491 RT_NOREF5(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
1492 return VINF_EM_RAW_EMULATE_INSTR;
1493#else
1494 if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
1495 {
1496 /* CPL. */
1497 if (pVCpu->iem.s.uCpl > 0)
1498 {
1499 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1500 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cpl;
1501 return iemRaiseGeneralProtectionFault0(pVCpu);
1502 }
1503
1504 /* A20M (A20 Masked) mode. */
1505 if (!PGMPhysIsA20Enabled(pVCpu))
1506 {
1507 Log(("vmxon: A20M mode -> #GP(0)\n"));
1508 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_A20M;
1509 return iemRaiseGeneralProtectionFault0(pVCpu);
1510 }
1511
1512 /* CR0 fixed bits. */
1513 bool const fUnrestrictedGuest = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxUnrestrictedGuest;
1514 uint64_t const uCr0Fixed0 = fUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX : VMX_V_CR0_FIXED0;
1515 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
1516 {
1517 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
1518 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cr0Fixed0;
1519 return iemRaiseGeneralProtectionFault0(pVCpu);
1520 }
1521
1522 /* CR4 fixed bits. */
1523 if ((pVCpu->cpum.GstCtx.cr4 & VMX_V_CR4_FIXED0) != VMX_V_CR4_FIXED0)
1524 {
1525 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
1526 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cr4Fixed0;
1527 return iemRaiseGeneralProtectionFault0(pVCpu);
1528 }
1529
1530 /* Feature control MSR's LOCK and VMXON bits. */
1531 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
1532 if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
1533 {
1534 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
1535 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_MsrFeatCtl;
1536 return iemRaiseGeneralProtectionFault0(pVCpu);
1537 }
1538
1539 /* Get the VMXON pointer from the location specified by the source memory operand. */
1540 RTGCPHYS GCPhysVmxon;
1541 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
1542 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1543 {
1544 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
1545 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrMap;
1546 return rcStrict;
1547 }
1548
1549 /* VMXON region pointer alignment. */
1550 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
1551 {
1552 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
1553 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrAlign;
1554 iemVmxVmFailInvalid(pVCpu);
1555 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1556 return VINF_SUCCESS;
1557 }
1558
1559 /* VMXON physical-address width limits. */
1560 Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT);
1561 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
1562 {
1563 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
1564 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrWidth;
1565 iemVmxVmFailInvalid(pVCpu);
1566 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1567 return VINF_SUCCESS;
1568 }
1569
1570 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
1571 restriction imposed by our implementation. */
1572 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
1573 {
1574 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
1575 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrAbnormal;
1576 iemVmxVmFailInvalid(pVCpu);
1577 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1578 return VINF_SUCCESS;
1579 }
1580
1581 /* Read the VMCS revision ID from the VMXON region. */
1582 VMXVMCSREVID VmcsRevId;
1583 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
1584 if (RT_FAILURE(rc))
1585 {
1586 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
1587 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrReadPhys;
1588 return rc;
1589 }
1590
1591 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
1592 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
1593 {
1594 /* Revision ID mismatch. */
1595 if (!VmcsRevId.n.fIsShadowVmcs)
1596 {
1597 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
1598 VmcsRevId.n.u31RevisionId));
1599 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmcsRevId;
1600 iemVmxVmFailInvalid(pVCpu);
1601 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1602 return VINF_SUCCESS;
1603 }
1604
1605 /* Shadow VMCS disallowed. */
1606 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
1607 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_ShadowVmcs;
1608 iemVmxVmFailInvalid(pVCpu);
1609 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1610 return VINF_SUCCESS;
1611 }
1612
1613 /*
1614 * Record that we're in VMX operation, block INIT, block and disable A20M.
1615 */
1616 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
1617 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
1618 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
1619 /** @todo NSTVMX: clear address-range monitoring. */
1620 /** @todo NSTVMX: Intel PT. */
1621 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Success;
1622 iemVmxVmSucceed(pVCpu);
1623 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1624# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
1625 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
1626# else
1627 return VINF_SUCCESS;
1628# endif
1629 }
1630 else if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1631 {
1632 RT_NOREF(pExitInfo);
1633 /** @todo NSTVMX: intercept. */
1634 }
1635
1636 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1637
1638 /* CPL. */
1639 if (pVCpu->iem.s.uCpl > 0)
1640 {
1641 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1642 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmxRootCpl;
1643 return iemRaiseGeneralProtectionFault0(pVCpu);
1644 }
1645
1646 /* VMXON when already in VMX root mode. */
1647 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
1648 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmxRoot;
1649 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1650 return VINF_SUCCESS;
1651#endif
1652}
1653
1654
1655/**
1656 * Implements 'VMXON'.
1657 */
1658IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
1659{
1660 return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
1661}
1662
1663
1664/**
1665 * Implements 'VMXOFF'.
1666 */
1667IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
1668{
1669# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1670 RT_NOREF2(pVCpu, cbInstr);
1671 return VINF_EM_RAW_EMULATE_INSTR;
1672# else
1673 IEM_VMX_INSTR_COMMON_CHECKS(pVCpu, "vmxoff", kVmxVInstrDiag_Vmxoff);
1674 if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
1675 {
1676 Log(("vmxoff: Not in VMX root mode -> #GP(0)\n"));
1677 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_VmxRoot;
1678 return iemRaiseUndefinedOpcode(pVCpu);
1679 }
1680
1681 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1682 {
1683 /** @todo NSTVMX: intercept. */
1684 }
1685
1686 /* CPL. */
1687 if (pVCpu->iem.s.uCpl > 0)
1688 {
1689 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1690 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_Cpl;
1691 return iemRaiseGeneralProtectionFault0(pVCpu);
1692 }
1693
1694 /* Dual monitor treatment of SMIs and SMM. */
1695 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
1696 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
1697 {
1698 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
1699 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1700 return VINF_SUCCESS;
1701 }
1702
1703 /*
1704 * Record that we're no longer in VMX root operation, block INIT, block and disable A20M.
1705 */
1706 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
1707 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
1708
1709 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
1710 { /** @todo NSTVMX: Unblock SMI. */ }
1711 /** @todo NSTVMX: Unblock and enable A20M. */
1712 /** @todo NSTVMX: Clear address-range monitoring. */
1713
1714 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_Success;
1715 iemVmxVmSucceed(pVCpu);
1716 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1717# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
1718 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
1719# else
1720 return VINF_SUCCESS;
1721# endif
1722# endif
1723}
1724
1725
1726/**
1727 * Implements 'VMPTRLD'.
1728 */
1729IEM_CIMPL_DEF_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
1730{
1731 return iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
1732}
1733
1734
1735/**
1736 * Implements 'VMPTRST'.
1737 */
1738IEM_CIMPL_DEF_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
1739{
1740 return iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
1741}
1742
1743
1744/**
1745 * Implements 'VMCLEAR'.
1746 */
1747IEM_CIMPL_DEF_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
1748{
1749 return iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
1750}
1751
1752
1753/**
1754 * Implements 'VMWRITE' register.
1755 */
1756IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint32_t, uFieldEnc)
1757{
1758 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /*iEffSeg*/, IEMMODE_64BIT /* N/A */, u64Val, uFieldEnc, NULL /* pExitInfo */);
1759}
1760
1761
1762/**
1763 * Implements 'VMWRITE' memory.
1764 */
1765IEM_CIMPL_DEF_4(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrVal, uint32_t, uFieldEnc)
1766{
1767 return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrVal, uFieldEnc, NULL /* pExitInfo */);
1768}
1769
1770#endif
1771
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette