VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 73992

Last change on this file since 73992 was 73984, checked in by vboxsync, 6 years ago

VMM/HM, IEM: Renamed VMX_EXIT_XDTR_ACCESS and VMX_EXIT_TR_ACCESS.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 86.0 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 73984 2018-08-31 08:30:50Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/**
20 * Implements 'VMCALL'.
21 */
22IEM_CIMPL_DEF_0(iemCImpl_vmcall)
23{
24 /** @todo NSTVMX: intercept. */
25
26 /* Join forces with vmmcall. */
27 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
28}
29
30#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
31/**
32 * Map of VMCS field encodings to their virtual-VMCS structure offsets.
33 *
34 * The first array dimension is VMCS field encoding of Width OR'ed with Type and the
35 * second dimension is the Index, see VMXVMCSFIELDENC.
36 */
37uint16_t const g_aoffVmcsMap[16][VMX_V_VMCS_MAX_INDEX + 1] =
38{
39 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
40 {
41 /* 0 */ RT_OFFSETOF(VMXVVMCS, u16Vpid),
42 /* 1 */ RT_OFFSETOF(VMXVVMCS, u16PostIntNotifyVector),
43 /* 2 */ RT_OFFSETOF(VMXVVMCS, u16EptpIndex),
44 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
45 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
46 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
47 },
48 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
49 {
50 /* 0-7 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
51 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
52 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
53 /* 24-25 */ UINT16_MAX, UINT16_MAX
54 },
55 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
56 {
57 /* 0 */ RT_OFFSETOF(VMXVVMCS, GuestEs),
58 /* 1 */ RT_OFFSETOF(VMXVVMCS, GuestCs),
59 /* 2 */ RT_OFFSETOF(VMXVVMCS, GuestSs),
60 /* 3 */ RT_OFFSETOF(VMXVVMCS, GuestDs),
61 /* 4 */ RT_OFFSETOF(VMXVVMCS, GuestFs),
62 /* 5 */ RT_OFFSETOF(VMXVVMCS, GuestGs),
63 /* 6 */ RT_OFFSETOF(VMXVVMCS, GuestLdtr),
64 /* 7 */ RT_OFFSETOF(VMXVVMCS, GuestTr),
65 /* 8 */ RT_OFFSETOF(VMXVVMCS, u16GuestIntStatus),
66 /* 9 */ RT_OFFSETOF(VMXVVMCS, u16PmlIndex),
67 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
68 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
69 },
70 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
71 {
72 /* 0 */ RT_OFFSETOF(VMXVVMCS, HostEs),
73 /* 1 */ RT_OFFSETOF(VMXVVMCS, HostCs),
74 /* 2 */ RT_OFFSETOF(VMXVVMCS, HostSs),
75 /* 3 */ RT_OFFSETOF(VMXVVMCS, HostDs),
76 /* 4 */ RT_OFFSETOF(VMXVVMCS, HostFs),
77 /* 5 */ RT_OFFSETOF(VMXVVMCS, HostGs),
78 /* 6 */ RT_OFFSETOF(VMXVVMCS, HostTr),
79 /* 7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
80 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
81 /* 23-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
82 },
83 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
84 {
85 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapA),
86 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
87 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
88 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmExitMsrStore),
89 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmExitMsrLoad),
90 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmEntryMsrLoad),
91 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
92 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64AddrPml),
93 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64TscOffset),
94 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64AddrVirtApic),
95 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64AddrApicAccess),
96 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64AddrPostedIntDesc),
97 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64VmFuncCtls),
98 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64EptpPtr),
99 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap0),
100 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap1),
101 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap2),
102 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap3),
103 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64AddrEptpList),
104 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmreadBitmap),
105 /* 20 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmwriteBitmap),
106 /* 21 */ RT_OFFSETOF(VMXVVMCS, u64AddrXcptVeInfo),
107 /* 22 */ RT_OFFSETOF(VMXVVMCS, u64AddrXssBitmap),
108 /* 23 */ RT_OFFSETOF(VMXVVMCS, u64AddrEnclsBitmap),
109 /* 24 */ UINT16_MAX,
110 /* 25 */ RT_OFFSETOF(VMXVVMCS, u64TscMultiplier)
111 },
112 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
113 {
114 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64GuestPhysAddr),
115 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
116 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
117 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
118 /* 25 */ UINT16_MAX
119 },
120 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
121 {
122 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
123 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
124 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestPatMsr),
125 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEferMsr),
126 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
127 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte0),
128 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte1),
129 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte2),
130 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte3),
131 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
132 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
133 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
134 },
135 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
136 {
137 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostPatMsr),
138 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostEferMsr),
139 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
140 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
141 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
142 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
143 },
144 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
145 {
146 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32PinCtls),
147 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls),
148 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32XcptBitmap),
149 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMask),
150 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMatch),
151 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32Cr3TargetCount),
152 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32ExitCtls),
153 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
154 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
155 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32EntryCtls),
156 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
157 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32EntryIntInfo),
158 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
159 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32EntryInstrLen),
160 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32TprTreshold),
161 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls2),
162 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32PleGap),
163 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32PleWindow),
164 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
165 },
166 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
167 {
168 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32RoVmInstrError),
169 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitReason),
170 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitIntInfo),
171 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitErrCode),
172 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
173 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
174 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitInstrLen),
175 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitInstrInfo),
176 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
177 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
178 /* 24-25 */ UINT16_MAX, UINT16_MAX
179 },
180 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
181 {
182 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
183 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsLimit),
184 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsLimit),
185 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsLimit),
186 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
187 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsLimit),
188 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsLimit),
189 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
190 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrLimit),
191 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
192 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
193 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsAttr),
194 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsAttr),
195 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsAttr),
196 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsAttr),
197 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsAttr),
198 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsAttr),
199 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
200 /* 18 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrAttr),
201 /* 19 */ RT_OFFSETOF(VMXVVMCS, u32GuestIntrState),
202 /* 20 */ RT_OFFSETOF(VMXVVMCS, u32GuestActivityState),
203 /* 21 */ RT_OFFSETOF(VMXVVMCS, u32GuestSmBase),
204 /* 22 */ RT_OFFSETOF(VMXVVMCS, u32GuestSysenterCS),
205 /* 23 */ RT_OFFSETOF(VMXVVMCS, u32PreemptTimer),
206 /* 24-25 */ UINT16_MAX, UINT16_MAX
207 },
208 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
209 {
210 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32HostSysenterCs),
211 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
212 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
213 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
214 /* 25 */ UINT16_MAX
215 },
216 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_CONTROL: */
217 {
218 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64Cr0Mask),
219 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64Cr4Mask),
220 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
221 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
222 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target0),
223 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target1),
224 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target2),
225 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target3),
226 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
227 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
228 /* 24-25 */ UINT16_MAX, UINT16_MAX
229 },
230 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
231 {
232 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64ExitQual),
233 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64IoRcx),
234 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64IoRsi),
235 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64IoRdi),
236 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64IoRip),
237 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestLinearAddr),
238 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
239 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
240 /* 22-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
241 },
242 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
243 {
244 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr0),
245 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr3),
246 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr4),
247 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEsBase),
248 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestCsBase),
249 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestSsBase),
250 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestDsBase),
251 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestFsBase),
252 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestGsBase),
253 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestLdtrBase),
254 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64GuestTrBase),
255 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64GuestGdtrBase),
256 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64GuestIdtrBase),
257 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64GuestDr7),
258 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64GuestRsp),
259 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64GuestRip),
260 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64GuestRFlags),
261 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpt),
262 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
263 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEip),
264 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
265 },
266 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_HOST_STATE: */
267 {
268 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostCr0),
269 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostCr3),
270 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostCr4),
271 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64HostFsBase),
272 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64HostGsBase),
273 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64HostTrBase),
274 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64HostGdtrBase),
275 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64HostIdtrBase),
276 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEsp),
277 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEip),
278 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64HostRsp),
279 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64HostRip),
280 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
281 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
282 }
283};
284
285
286/**
287 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
288 * relative offsets.
289 */
290# ifdef IEM_WITH_CODE_TLB
291# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
292# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
293# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
294# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
295# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
296# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
297# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
298# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
299# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
300# else /* !IEM_WITH_CODE_TLB */
301# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
302 do \
303 { \
304 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
305 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
306 } while (0)
307
308# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
309
310# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
311 do \
312 { \
313 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
314 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
315 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
316 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
317 } while (0)
318
319# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
320 do \
321 { \
322 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
323 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
324 } while (0)
325
326# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
327 do \
328 { \
329 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
330 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
331 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
332 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
333 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
334 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
335 } while (0)
336
337# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
338 do \
339 { \
340 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
341 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
342 } while (0)
343
344# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
345 do \
346 { \
347 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
348 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
349 } while (0)
350
351# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
352 do \
353 { \
354 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
355 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
356 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
357 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
358 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
359 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
360 } while (0)
361# endif /* !IEM_WITH_CODE_TLB */
362
363/** Whether a shadow VMCS is present for the given VCPU. */
364#define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
365
366/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
367#define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u64VmcsLinkPtr.u)
368
369/** Whether a current VMCS is present for the given VCPU. */
370#define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
371
372/** Gets the guest-physical address of the current VMCS for the given VCPU. */
373#define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
374
375/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
376#define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
377 do \
378 { \
379 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
380 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
381 } while (0)
382
383/** Clears any current VMCS for the given VCPU. */
384#define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
385 do \
386 { \
387 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
388 } while (0)
389
390
391/**
392 * Returns whether the given VMCS field is valid and supported by our emulation.
393 *
394 * @param pVCpu The cross context virtual CPU structure.
395 * @param uFieldEnc The VMCS field encoding.
396 *
397 * @remarks This takes into account the CPU features exposed to the guest.
398 */
399IEM_STATIC bool iemVmxIsVmcsFieldValid(PVMCPU pVCpu, uint32_t uFieldEnc)
400{
401 PCCPUMFEATURES pFeat = IEM_GET_GUEST_CPU_FEATURES(pVCpu);
402 switch (uFieldEnc)
403 {
404 /*
405 * 16-bit fields.
406 */
407 /* Control fields. */
408 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
409 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
410 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
411
412 /* Guest-state fields. */
413 case VMX_VMCS16_GUEST_ES_SEL:
414 case VMX_VMCS16_GUEST_CS_SEL:
415 case VMX_VMCS16_GUEST_SS_SEL:
416 case VMX_VMCS16_GUEST_DS_SEL:
417 case VMX_VMCS16_GUEST_FS_SEL:
418 case VMX_VMCS16_GUEST_GS_SEL:
419 case VMX_VMCS16_GUEST_LDTR_SEL:
420 case VMX_VMCS16_GUEST_TR_SEL:
421 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
422 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
423
424 /* Host-state fields. */
425 case VMX_VMCS16_HOST_ES_SEL:
426 case VMX_VMCS16_HOST_CS_SEL:
427 case VMX_VMCS16_HOST_SS_SEL:
428 case VMX_VMCS16_HOST_DS_SEL:
429 case VMX_VMCS16_HOST_FS_SEL:
430 case VMX_VMCS16_HOST_GS_SEL:
431 case VMX_VMCS16_HOST_TR_SEL: return true;
432
433 /*
434 * 64-bit fields.
435 */
436 /* Control fields. */
437 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
438 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
439 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
440 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
441 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
442 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
443 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
444 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
445 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
446 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
447 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
448 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
449 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
450 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
451 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
452 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
453 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
454 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
455 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
456 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
457 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
458 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
459 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
460 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
461 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
462 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
463 case VMX_VMCS64_CTRL_EPTP_FULL:
464 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
465 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
466 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
467 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
468 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
469 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
470 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
471 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
472 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
473 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
474 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
475 {
476 uint64_t const uVmFuncMsr = CPUMGetGuestIa32VmxVmFunc(pVCpu);
477 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
478 }
479 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
480 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
481 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
482 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
483 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
484 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
485 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
486 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
487 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
488 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
489 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
490 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
491
492 /* Read-only data fields. */
493 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
494 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
495
496 /* Guest-state fields. */
497 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
498 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
499 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
500 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
501 case VMX_VMCS64_GUEST_PAT_FULL:
502 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
503 case VMX_VMCS64_GUEST_EFER_FULL:
504 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
505 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
506 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
507 case VMX_VMCS64_GUEST_PDPTE0_FULL:
508 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
509 case VMX_VMCS64_GUEST_PDPTE1_FULL:
510 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
511 case VMX_VMCS64_GUEST_PDPTE2_FULL:
512 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
513 case VMX_VMCS64_GUEST_PDPTE3_FULL:
514 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
515 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
516 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
517
518 /* Host-state fields. */
519 case VMX_VMCS64_HOST_PAT_FULL:
520 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
521 case VMX_VMCS64_HOST_EFER_FULL:
522 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
523 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
524 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
525
526 /*
527 * 32-bit fields.
528 */
529 /* Control fields. */
530 case VMX_VMCS32_CTRL_PIN_EXEC:
531 case VMX_VMCS32_CTRL_PROC_EXEC:
532 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
533 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
534 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
535 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
536 case VMX_VMCS32_CTRL_EXIT:
537 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
538 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
539 case VMX_VMCS32_CTRL_ENTRY:
540 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
541 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
542 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
543 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
544 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
545 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
546 case VMX_VMCS32_CTRL_PLE_GAP:
547 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
548
549 /* Read-only data fields. */
550 case VMX_VMCS32_RO_VM_INSTR_ERROR:
551 case VMX_VMCS32_RO_EXIT_REASON:
552 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
553 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
554 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
555 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
556 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
557 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
558
559 /* Guest-state fields. */
560 case VMX_VMCS32_GUEST_ES_LIMIT:
561 case VMX_VMCS32_GUEST_CS_LIMIT:
562 case VMX_VMCS32_GUEST_SS_LIMIT:
563 case VMX_VMCS32_GUEST_DS_LIMIT:
564 case VMX_VMCS32_GUEST_FS_LIMIT:
565 case VMX_VMCS32_GUEST_GS_LIMIT:
566 case VMX_VMCS32_GUEST_LDTR_LIMIT:
567 case VMX_VMCS32_GUEST_TR_LIMIT:
568 case VMX_VMCS32_GUEST_GDTR_LIMIT:
569 case VMX_VMCS32_GUEST_IDTR_LIMIT:
570 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
571 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
572 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
573 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
574 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
575 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
576 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
577 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
578 case VMX_VMCS32_GUEST_INT_STATE:
579 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
580 case VMX_VMCS32_GUEST_SMBASE:
581 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
582 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
583
584 /* Host-state fields. */
585 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
586
587 /*
588 * Natural-width fields.
589 */
590 /* Control fields. */
591 case VMX_VMCS_CTRL_CR0_MASK:
592 case VMX_VMCS_CTRL_CR4_MASK:
593 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
594 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
595 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
596 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
597 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
598 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
599
600 /* Read-only data fields. */
601 case VMX_VMCS_RO_EXIT_QUALIFICATION:
602 case VMX_VMCS_RO_IO_RCX:
603 case VMX_VMCS_RO_IO_RSX:
604 case VMX_VMCS_RO_IO_RDI:
605 case VMX_VMCS_RO_IO_RIP:
606 case VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR: return true;
607
608 /* Guest-state fields. */
609 case VMX_VMCS_GUEST_CR0:
610 case VMX_VMCS_GUEST_CR3:
611 case VMX_VMCS_GUEST_CR4:
612 case VMX_VMCS_GUEST_ES_BASE:
613 case VMX_VMCS_GUEST_CS_BASE:
614 case VMX_VMCS_GUEST_SS_BASE:
615 case VMX_VMCS_GUEST_DS_BASE:
616 case VMX_VMCS_GUEST_FS_BASE:
617 case VMX_VMCS_GUEST_GS_BASE:
618 case VMX_VMCS_GUEST_LDTR_BASE:
619 case VMX_VMCS_GUEST_TR_BASE:
620 case VMX_VMCS_GUEST_GDTR_BASE:
621 case VMX_VMCS_GUEST_IDTR_BASE:
622 case VMX_VMCS_GUEST_DR7:
623 case VMX_VMCS_GUEST_RSP:
624 case VMX_VMCS_GUEST_RIP:
625 case VMX_VMCS_GUEST_RFLAGS:
626 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
627 case VMX_VMCS_GUEST_SYSENTER_ESP:
628 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
629
630 /* Host-state fields. */
631 case VMX_VMCS_HOST_CR0:
632 case VMX_VMCS_HOST_CR3:
633 case VMX_VMCS_HOST_CR4:
634 case VMX_VMCS_HOST_FS_BASE:
635 case VMX_VMCS_HOST_GS_BASE:
636 case VMX_VMCS_HOST_TR_BASE:
637 case VMX_VMCS_HOST_GDTR_BASE:
638 case VMX_VMCS_HOST_IDTR_BASE:
639 case VMX_VMCS_HOST_SYSENTER_ESP:
640 case VMX_VMCS_HOST_SYSENTER_EIP:
641 case VMX_VMCS_HOST_RSP:
642 case VMX_VMCS_HOST_RIP: return true;
643 }
644
645 return false;
646}
647
648
649/**
650 * Gets VM-exit instruction information along with any displacement for an
651 * instruction VM-exit.
652 *
653 * @returns The VM-exit instruction information.
654 * @param pVCpu The cross context virtual CPU structure.
655 * @param uExitReason The VM-exit reason.
656 * @param uInstrId The VM-exit instruction identity (VMX_INSTR_ID_XXX) if
657 * any. Pass VMX_INSTR_ID_NONE otherwise.
658 * @param fPrimaryOpRead If the primary operand of the ModR/M byte (bits 0:3) is
659 * a read or write.
660 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
661 * NULL.
662 */
663IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, bool fPrimaryOpRead,
664 PRTGCPTR pGCPtrDisp)
665{
666 RTGCPTR GCPtrDisp;
667 VMXEXITINSTRINFO ExitInstrInfo;
668 ExitInstrInfo.u = 0;
669
670 /*
671 * Get and parse the ModR/M byte from our decoded opcodes.
672 */
673 uint8_t bRm;
674 uint8_t const offModRm = pVCpu->iem.s.offModRm;
675 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
676 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
677 {
678 /*
679 * ModR/M indicates register addressing.
680 *
681 * The primary/secondary register operands are reported in the iReg1 or iReg2
682 * fields depending on whether it is a read/write form.
683 */
684 uint8_t idxReg1;
685 uint8_t idxReg2;
686 if (fPrimaryOpRead)
687 {
688 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
689 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
690 }
691 else
692 {
693 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
694 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
695 }
696 ExitInstrInfo.All.u2Scaling = 0;
697 ExitInstrInfo.All.iReg1 = idxReg1;
698 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
699 ExitInstrInfo.All.fIsRegOperand = 1;
700 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
701 ExitInstrInfo.All.iSegReg = 0;
702 ExitInstrInfo.All.iIdxReg = 0;
703 ExitInstrInfo.All.fIdxRegInvalid = 1;
704 ExitInstrInfo.All.iBaseReg = 0;
705 ExitInstrInfo.All.fBaseRegInvalid = 1;
706 ExitInstrInfo.All.iReg2 = idxReg2;
707
708 /* Displacement not applicable for register addressing. */
709 GCPtrDisp = 0;
710 }
711 else
712 {
713 /*
714 * ModR/M indicates memory addressing.
715 */
716 uint8_t uScale = 0;
717 bool fBaseRegValid = false;
718 bool fIdxRegValid = false;
719 uint8_t iBaseReg = 0;
720 uint8_t iIdxReg = 0;
721 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
722 {
723 /*
724 * Parse the ModR/M, displacement for 16-bit addressing mode.
725 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
726 */
727 uint16_t u16Disp = 0;
728 uint8_t const offDisp = offModRm + sizeof(bRm);
729 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
730 {
731 /* Displacement without any registers. */
732 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
733 }
734 else
735 {
736 /* Register (index and base). */
737 switch (bRm & X86_MODRM_RM_MASK)
738 {
739 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
740 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
741 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
742 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
743 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
744 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
745 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
746 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
747 }
748
749 /* Register + displacement. */
750 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
751 {
752 case 0: break;
753 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
754 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
755 default:
756 {
757 /* Register addressing, handled at the beginning. */
758 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
759 break;
760 }
761 }
762 }
763
764 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
765 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
766 }
767 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
768 {
769 /*
770 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
771 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
772 */
773 uint32_t u32Disp = 0;
774 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
775 {
776 /* Displacement without any registers. */
777 uint8_t const offDisp = offModRm + sizeof(bRm);
778 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
779 }
780 else
781 {
782 /* Register (and perhaps scale, index and base). */
783 uint8_t offDisp = offModRm + sizeof(bRm);
784 iBaseReg = (bRm & X86_MODRM_RM_MASK);
785 if (iBaseReg == 4)
786 {
787 /* An SIB byte follows the ModR/M byte, parse it. */
788 uint8_t bSib;
789 uint8_t const offSib = offModRm + sizeof(bRm);
790 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
791
792 /* A displacement may follow SIB, update its offset. */
793 offDisp += sizeof(bSib);
794
795 /* Get the scale. */
796 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
797
798 /* Get the index register. */
799 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
800 fIdxRegValid = RT_BOOL(iIdxReg != 4);
801
802 /* Get the base register. */
803 iBaseReg = bSib & X86_SIB_BASE_MASK;
804 fBaseRegValid = true;
805 if (iBaseReg == 5)
806 {
807 if ((bRm & X86_MODRM_MOD_MASK) == 0)
808 {
809 /* Mod is 0 implies a 32-bit displacement with no base. */
810 fBaseRegValid = false;
811 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
812 }
813 else
814 {
815 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
816 iBaseReg = X86_GREG_xBP;
817 }
818 }
819 }
820
821 /* Register + displacement. */
822 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
823 {
824 case 0: /* Handled above */ break;
825 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
826 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
827 default:
828 {
829 /* Register addressing, handled at the beginning. */
830 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
831 break;
832 }
833 }
834 }
835
836 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
837 }
838 else
839 {
840 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
841
842 /*
843 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
844 * See Intel instruction spec. 2.2 "IA-32e Mode".
845 */
846 uint64_t u64Disp = 0;
847 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
848 if (fRipRelativeAddr)
849 {
850 /*
851 * RIP-relative addressing mode.
852 *
853 * The displacment is 32-bit signed implying an offset range of +/-2G.
854 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
855 */
856 uint8_t const offDisp = offModRm + sizeof(bRm);
857 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
858 }
859 else
860 {
861 uint8_t offDisp = offModRm + sizeof(bRm);
862
863 /*
864 * Register (and perhaps scale, index and base).
865 *
866 * REX.B extends the most-significant bit of the base register. However, REX.B
867 * is ignored while determining whether an SIB follows the opcode. Hence, we
868 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
869 *
870 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
871 */
872 iBaseReg = (bRm & X86_MODRM_RM_MASK);
873 if (iBaseReg == 4)
874 {
875 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
876 uint8_t bSib;
877 uint8_t const offSib = offModRm + sizeof(bRm);
878 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
879
880 /* Displacement may follow SIB, update its offset. */
881 offDisp += sizeof(bSib);
882
883 /* Get the scale. */
884 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
885
886 /* Get the index. */
887 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
888 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
889
890 /* Get the base. */
891 iBaseReg = (bSib & X86_SIB_BASE_MASK);
892 fBaseRegValid = true;
893 if (iBaseReg == 5)
894 {
895 if ((bRm & X86_MODRM_MOD_MASK) == 0)
896 {
897 /* Mod is 0 implies a signed 32-bit displacement with no base. */
898 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
899 }
900 else
901 {
902 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
903 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
904 }
905 }
906 }
907 iBaseReg |= pVCpu->iem.s.uRexB;
908
909 /* Register + displacement. */
910 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
911 {
912 case 0: /* Handled above */ break;
913 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
914 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
915 default:
916 {
917 /* Register addressing, handled at the beginning. */
918 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
919 break;
920 }
921 }
922 }
923
924 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
925 }
926
927 /*
928 * The primary or secondary register operand is reported in iReg2 depending
929 * on whether the primary operand is in read/write form.
930 */
931 uint8_t idxReg2;
932 if (fPrimaryOpRead)
933 {
934 idxReg2 = bRm & X86_MODRM_RM_MASK;
935 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
936 idxReg2 |= pVCpu->iem.s.uRexB;
937 }
938 else
939 {
940 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK;
941 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
942 idxReg2 |= pVCpu->iem.s.uRexReg;
943 }
944 ExitInstrInfo.All.u2Scaling = uScale;
945 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */
946 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
947 ExitInstrInfo.All.fIsRegOperand = 0;
948 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
949 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
950 ExitInstrInfo.All.iIdxReg = iIdxReg;
951 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
952 ExitInstrInfo.All.iBaseReg = iBaseReg;
953 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
954 ExitInstrInfo.All.iReg2 = idxReg2;
955 }
956
957 /*
958 * Handle exceptions for certain instructions.
959 * (e.g. some instructions convey an instruction identity).
960 */
961 switch (uExitReason)
962 {
963 case VMX_EXIT_GDTR_IDTR_ACCESS:
964 {
965 Assert(VMX_INSTR_ID_IS_VALID(uInstrId));
966 ExitInstrInfo.GdtIdt.u2InstrId = VMX_INSTR_ID_GET_ID(uInstrId);
967 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
968 break;
969 }
970
971 case VMX_EXIT_LDTR_TR_ACCESS:
972 {
973 Assert(VMX_INSTR_ID_IS_VALID(uInstrId));
974 ExitInstrInfo.LdtTr.u2InstrId = VMX_INSTR_ID_GET_ID(uInstrId);
975 ExitInstrInfo.LdtTr.u2Undef0 = 0;
976 break;
977 }
978
979 case VMX_EXIT_RDRAND:
980 case VMX_EXIT_RDSEED:
981 {
982 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
983 break;
984 }
985 }
986
987 /* Update displacement and return the constructed VM-exit instruction information field. */
988 if (pGCPtrDisp)
989 *pGCPtrDisp = GCPtrDisp;
990 return ExitInstrInfo.u;
991}
992
993
994/**
995 * Implements VMSucceed for VMX instruction success.
996 *
997 * @param pVCpu The cross context virtual CPU structure.
998 */
999DECLINLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
1000{
1001 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1002}
1003
1004
1005/**
1006 * Implements VMFailInvalid for VMX instruction failure.
1007 *
1008 * @param pVCpu The cross context virtual CPU structure.
1009 */
1010DECLINLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
1011{
1012 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1013 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
1014}
1015
1016
1017/**
1018 * Implements VMFailValid for VMX instruction failure.
1019 *
1020 * @param pVCpu The cross context virtual CPU structure.
1021 * @param enmInsErr The VM instruction error.
1022 */
1023DECLINLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1024{
1025 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1026 {
1027 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1028 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
1029 /** @todo NSTVMX: VMWrite enmInsErr to VM-instruction error field. */
1030 RT_NOREF(enmInsErr);
1031 }
1032}
1033
1034
1035/**
1036 * Implements VMFail for VMX instruction failure.
1037 *
1038 * @param pVCpu The cross context virtual CPU structure.
1039 * @param enmInsErr The VM instruction error.
1040 */
1041DECLINLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1042{
1043 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1044 {
1045 iemVmxVmFailValid(pVCpu, enmInsErr);
1046 /** @todo Set VM-instruction error field in the current virtual-VMCS. */
1047 }
1048 else
1049 iemVmxVmFailInvalid(pVCpu);
1050}
1051
1052
1053/**
1054 * Flushes the current VMCS contents back to guest memory.
1055 *
1056 * @returns VBox status code.
1057 * @param pVCpu The cross context virtual CPU structure.
1058 */
1059DECLINLINE(int) iemVmxCommitCurrentVmcsToMemory(PVMCPU pVCpu)
1060{
1061 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1062 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
1063 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));
1064 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
1065 return rc;
1066}
1067
1068
1069/**
1070 * Implements VMSucceed for the VMREAD instruction and increments the guest RIP.
1071 *
1072 * @param pVCpu The cross context virtual CPU structure.
1073 */
1074DECLINLINE(void) iemVmxVmreadSuccess(PVMCPU pVCpu, uint8_t cbInstr)
1075{
1076 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_Success;
1077 iemVmxVmSucceed(pVCpu);
1078 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1079}
1080
1081
1082/**
1083 * VMREAD common (memory/register) instruction execution worker
1084 *
1085 * @param pVCpu The cross context virtual CPU structure.
1086 * @param cbInstr The instruction length.
1087 * @param pu64Dst Where to write the VMCS value (only updated when
1088 * VINF_SUCCESS is returned).
1089 * @param uFieldEnc The VMCS field encoding.
1090 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1091 * be NULL.
1092 */
1093IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint32_t uFieldEnc,
1094 PCVMXVEXITINFO pExitInfo)
1095{
1096 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1097 {
1098 RT_NOREF(pExitInfo); RT_NOREF(cbInstr);
1099 /** @todo NSTVMX: intercept. */
1100 /** @todo NSTVMX: VMCS shadowing intercept (VMREAD bitmap). */
1101 }
1102
1103 /* CPL. */
1104 if (CPUMGetGuestCPL(pVCpu) > 0)
1105 {
1106 Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1107 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_Cpl;
1108 return iemRaiseGeneralProtectionFault0(pVCpu);
1109 }
1110
1111 /* VMCS pointer in root mode. */
1112 if ( IEM_IS_VMX_ROOT_MODE(pVCpu)
1113 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1114 {
1115 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
1116 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_PtrInvalid;
1117 iemVmxVmFailInvalid(pVCpu);
1118 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1119 return VINF_SUCCESS;
1120 }
1121
1122 /* VMCS-link pointer in non-root mode. */
1123 if ( IEM_IS_VMX_NON_ROOT_MODE(pVCpu)
1124 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
1125 {
1126 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
1127 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_LinkPtrInvalid;
1128 iemVmxVmFailInvalid(pVCpu);
1129 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1130 return VINF_SUCCESS;
1131 }
1132
1133 /* Supported VMCS field. */
1134 if (!iemVmxIsVmcsFieldValid(pVCpu, uFieldEnc))
1135 {
1136 Log(("vmread: VMCS field %#x invalid -> VMFail\n", uFieldEnc));
1137 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_FieldInvalid;
1138 iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
1139 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1140 return VINF_SUCCESS;
1141 }
1142
1143 /*
1144 * Setup reading from the current or shadow VMCS.
1145 */
1146 uint8_t *pbVmcs;
1147 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1148 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
1149 else
1150 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1151 Assert(pbVmcs);
1152
1153 PCVMXVMCSFIELDENC pFieldEnc = (PCVMXVMCSFIELDENC)&uFieldEnc;
1154 uint8_t const uWidth = pFieldEnc->n.u2Width;
1155 uint8_t const uType = pFieldEnc->n.u2Type;
1156 uint8_t const uWidthType = (uWidth << 2) | uType;
1157 uint8_t const uIndex = pFieldEnc->n.u8Index;
1158 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
1159 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
1160
1161 /*
1162 * Read the VMCS component based on the field's effective width.
1163 *
1164 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
1165 * indicates high bits (little endian).
1166 *
1167 * Note! The caller is responsible to trim the result and update registers
1168 * or memory locations are required. Here we just zero-extend to the largest
1169 * type (i.e. 64-bits).
1170 */
1171 uint8_t *pbField = pbVmcs + offField;
1172 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(uFieldEnc);
1173 switch (uEffWidth)
1174 {
1175 case VMX_VMCS_ENC_WIDTH_64BIT:
1176 case VMX_VMCS_ENC_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break;
1177 case VMX_VMCS_ENC_WIDTH_32BIT: *pu64Dst = *(uint32_t *)pbField; break;
1178 case VMX_VMCS_ENC_WIDTH_16BIT: *pu64Dst = *(uint16_t *)pbField; break;
1179 }
1180 return VINF_SUCCESS;
1181}
1182
1183
1184/**
1185 * VMREAD (64-bit register) instruction execution worker.
1186 *
1187 * @param pVCpu The cross context virtual CPU structure.
1188 * @param cbInstr The instruction length.
1189 * @param pu64Dst Where to store the VMCS field's value.
1190 * @param uFieldEnc The VMCS field encoding.
1191 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1192 * be NULL.
1193 */
1194IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint32_t uFieldEnc,
1195 PCVMXVEXITINFO pExitInfo)
1196{
1197 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
1198 if (rcStrict == VINF_SUCCESS)
1199 {
1200 iemVmxVmreadSuccess(pVCpu, cbInstr);
1201 return VINF_SUCCESS;
1202 }
1203
1204 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1205 return rcStrict;
1206}
1207
1208
1209/**
1210 * VMREAD (32-bit register) instruction execution worker.
1211 *
1212 * @param pVCpu The cross context virtual CPU structure.
1213 * @param cbInstr The instruction length.
1214 * @param pu32Dst Where to store the VMCS field's value.
1215 * @param uFieldEnc The VMCS field encoding.
1216 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1217 * be NULL.
1218 */
1219IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPU pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint32_t uFieldEnc,
1220 PCVMXVEXITINFO pExitInfo)
1221{
1222 uint64_t u64Dst;
1223 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, uFieldEnc, pExitInfo);
1224 if (rcStrict == VINF_SUCCESS)
1225 {
1226 *pu32Dst = u64Dst;
1227 iemVmxVmreadSuccess(pVCpu, cbInstr);
1228 return VINF_SUCCESS;
1229 }
1230
1231 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1232 return rcStrict;
1233}
1234
1235
1236/**
1237 * VMREAD (memory) instruction execution worker.
1238 *
1239 * @param pVCpu The cross context virtual CPU structure.
1240 * @param cbInstr The instruction length.
1241 * @param iEffSeg The effective segment register to use with @a u64Val.
1242 * Pass UINT8_MAX if it is a register access.
1243 * @param enmEffAddrMode The effective addressing mode (only used with memory
1244 * operand).
1245 * @param GCPtrDst The guest linear address to store the VMCS field's
1246 * value.
1247 * @param uFieldEnc The VMCS field encoding.
1248 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1249 * be NULL.
1250 */
1251IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode,
1252 RTGCPTR GCPtrDst, uint32_t uFieldEnc, PCVMXVEXITINFO pExitInfo)
1253{
1254 uint64_t u64Dst;
1255 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, uFieldEnc, pExitInfo);
1256 if (rcStrict == VINF_SUCCESS)
1257 {
1258 /*
1259 * Write the VMCS field's value to the location specified in guest-memory.
1260 *
1261 * The pointer size depends on the address size (address-size prefix allowed).
1262 * The operand size depends on IA-32e mode (operand-size prefix not allowed).
1263 */
1264 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
1265 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
1266 GCPtrDst &= s_auAddrSizeMasks[enmEffAddrMode];
1267
1268 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1269 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
1270 else
1271 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
1272 if (rcStrict == VINF_SUCCESS)
1273 {
1274 iemVmxVmreadSuccess(pVCpu, cbInstr);
1275 return VINF_SUCCESS;
1276 }
1277
1278 Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
1279 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_PtrMap;
1280 return rcStrict;
1281 }
1282
1283 Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1284 return rcStrict;
1285}
1286
1287
1288/**
1289 * VMWRITE instruction execution worker.
1290 *
1291 * @param pVCpu The cross context virtual CPU structure.
1292 * @param cbInstr The instruction length.
1293 * @param iEffSeg The effective segment register to use with @a u64Val.
1294 * Pass UINT8_MAX if it is a register access.
1295 * @param enmEffAddrMode The effective addressing mode (only used with memory
1296 * operand).
1297 * @param u64Val The value to write (or guest linear address to the
1298 * value), @a iEffSeg will indicate if it's a memory
1299 * operand.
1300 * @param uFieldEnc The VMCS field encoding.
1301 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1302 * be NULL.
1303 */
1304IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, uint64_t u64Val,
1305 uint32_t uFieldEnc, PCVMXVEXITINFO pExitInfo)
1306{
1307 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1308 {
1309 RT_NOREF(pExitInfo);
1310 /** @todo NSTVMX: intercept. */
1311 /** @todo NSTVMX: VMCS shadowing intercept (VMWRITE bitmap). */
1312 }
1313
1314 /* CPL. */
1315 if (CPUMGetGuestCPL(pVCpu) > 0)
1316 {
1317 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1318 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_Cpl;
1319 return iemRaiseGeneralProtectionFault0(pVCpu);
1320 }
1321
1322 /* VMCS pointer in root mode. */
1323 if ( IEM_IS_VMX_ROOT_MODE(pVCpu)
1324 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1325 {
1326 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
1327 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_PtrInvalid;
1328 iemVmxVmFailInvalid(pVCpu);
1329 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1330 return VINF_SUCCESS;
1331 }
1332
1333 /* VMCS-link pointer in non-root mode. */
1334 if ( IEM_IS_VMX_NON_ROOT_MODE(pVCpu)
1335 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
1336 {
1337 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
1338 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_LinkPtrInvalid;
1339 iemVmxVmFailInvalid(pVCpu);
1340 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1341 return VINF_SUCCESS;
1342 }
1343
1344 /* If the VMWRITE instruction references memory, access the specified memory operand. */
1345 bool const fIsRegOperand = iEffSeg == UINT8_MAX;
1346 if (!fIsRegOperand)
1347 {
1348 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
1349 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
1350 RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[enmEffAddrMode];
1351
1352 /* Read the value from the specified guest memory location. */
1353 VBOXSTRICTRC rcStrict;
1354 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1355 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
1356 else
1357 {
1358 uint32_t u32Val;
1359 rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, iEffSeg, GCPtrVal);
1360 u64Val = u32Val;
1361 }
1362 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1363 {
1364 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
1365 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_PtrMap;
1366 return rcStrict;
1367 }
1368 }
1369 else
1370 Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
1371
1372 /* Supported VMCS field. */
1373 if (!iemVmxIsVmcsFieldValid(pVCpu, uFieldEnc))
1374 {
1375 Log(("vmwrite: VMCS field %#x invalid -> VMFail\n", uFieldEnc));
1376 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_FieldInvalid;
1377 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
1378 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1379 return VINF_SUCCESS;
1380 }
1381
1382 /* Read-only VMCS field. */
1383 bool const fReadOnlyField = HMVmxIsVmcsFieldReadOnly(uFieldEnc);
1384 if ( fReadOnlyField
1385 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
1386 {
1387 Log(("vmwrite: Write to read-only VMCS component %#x -> VMFail\n", uFieldEnc));
1388 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_FieldRo;
1389 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
1390 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1391 return VINF_SUCCESS;
1392 }
1393
1394 /*
1395 * Setup writing to the current or shadow VMCS.
1396 */
1397 uint8_t *pbVmcs;
1398 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1399 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
1400 else
1401 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1402 Assert(pbVmcs);
1403
1404 PCVMXVMCSFIELDENC pFieldEnc = (PCVMXVMCSFIELDENC)&uFieldEnc;
1405 uint8_t const uWidth = pFieldEnc->n.u2Width;
1406 uint8_t const uType = pFieldEnc->n.u2Type;
1407 uint8_t const uWidthType = (uWidth << 2) | uType;
1408 uint8_t const uIndex = pFieldEnc->n.u8Index;
1409 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
1410 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
1411
1412 /*
1413 * Write the VMCS component based on the field's effective width.
1414 *
1415 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
1416 * indicates high bits (little endian).
1417 */
1418 uint8_t *pbField = pbVmcs + offField;
1419 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(uFieldEnc);
1420 switch (uEffWidth)
1421 {
1422 case VMX_VMCS_ENC_WIDTH_64BIT:
1423 case VMX_VMCS_ENC_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
1424 case VMX_VMCS_ENC_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break;
1425 case VMX_VMCS_ENC_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break;
1426 }
1427
1428 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_Success;
1429 iemVmxVmSucceed(pVCpu);
1430 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1431 return VINF_SUCCESS;
1432}
1433
1434
1435/**
1436 * VMCLEAR instruction execution worker.
1437 *
1438 * @param pVCpu The cross context virtual CPU structure.
1439 * @param cbInstr The instruction length.
1440 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
1441 * @param GCPtrVmcs The linear address of the VMCS pointer.
1442 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1443 * be NULL.
1444 *
1445 * @remarks Common VMX instruction checks are already expected to by the caller,
1446 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1447 */
1448IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1449 PCVMXVEXITINFO pExitInfo)
1450{
1451 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1452 {
1453 RT_NOREF(pExitInfo);
1454 /** @todo NSTVMX: intercept. */
1455 }
1456 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1457
1458 /* CPL. */
1459 if (CPUMGetGuestCPL(pVCpu) > 0)
1460 {
1461 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1462 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_Cpl;
1463 return iemRaiseGeneralProtectionFault0(pVCpu);
1464 }
1465
1466 /* Get the VMCS pointer from the location specified by the source memory operand. */
1467 RTGCPHYS GCPhysVmcs;
1468 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
1469 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1470 {
1471 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
1472 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrMap;
1473 return rcStrict;
1474 }
1475
1476 /* VMCS pointer alignment. */
1477 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
1478 {
1479 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
1480 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrAlign;
1481 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1482 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1483 return VINF_SUCCESS;
1484 }
1485
1486 /* VMCS physical-address width limits. */
1487 Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT);
1488 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
1489 {
1490 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
1491 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrWidth;
1492 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1493 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1494 return VINF_SUCCESS;
1495 }
1496
1497 /* VMCS is not the VMXON region. */
1498 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
1499 {
1500 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
1501 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrVmxon;
1502 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
1503 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1504 return VINF_SUCCESS;
1505 }
1506
1507 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
1508 restriction imposed by our implementation. */
1509 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
1510 {
1511 Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
1512 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrAbnormal;
1513 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1514 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1515 return VINF_SUCCESS;
1516 }
1517
1518 /*
1519 * VMCLEAR allows committing and clearing any valid VMCS pointer.
1520 *
1521 * If the current VMCS is the one being cleared, set its state to 'clear' and commit
1522 * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
1523 * to 'clear'.
1524 */
1525 uint8_t const fVmcsStateClear = VMX_V_VMCS_STATE_CLEAR;
1526 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
1527 {
1528 Assert(GCPhysVmcs != NIL_RTGCPHYS); /* Paranoia. */
1529 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsStateClear;
1530 iemVmxCommitCurrentVmcsToMemory(pVCpu);
1531 Assert(!IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1532 }
1533 else
1534 {
1535 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPtrVmcs + RT_OFFSETOF(VMXVVMCS, fVmcsState),
1536 (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear));
1537 }
1538
1539 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_Success;
1540 iemVmxVmSucceed(pVCpu);
1541 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1542 return rcStrict;
1543}
1544
1545
1546/**
1547 * VMPTRST instruction execution worker.
1548 *
1549 * @param pVCpu The cross context virtual CPU structure.
1550 * @param cbInstr The instruction length.
1551 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
1552 * @param GCPtrVmcs The linear address of where to store the current VMCS
1553 * pointer.
1554 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1555 * be NULL.
1556 *
1557 * @remarks Common VMX instruction checks are already expected to by the caller,
1558 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1559 */
1560IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1561 PCVMXVEXITINFO pExitInfo)
1562{
1563 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1564 {
1565 RT_NOREF(pExitInfo);
1566 /** @todo NSTVMX: intercept. */
1567 }
1568 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1569
1570 /* CPL. */
1571 if (CPUMGetGuestCPL(pVCpu) > 0)
1572 {
1573 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1574 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrst_Cpl;
1575 return iemRaiseGeneralProtectionFault0(pVCpu);
1576 }
1577
1578 /* Set the VMCS pointer to the location specified by the destination memory operand. */
1579 AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
1580 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
1581 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1582 {
1583 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrst_Success;
1584 iemVmxVmSucceed(pVCpu);
1585 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1586 return rcStrict;
1587 }
1588
1589 Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1590 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrst_PtrMap;
1591 return rcStrict;
1592}
1593
1594
1595/**
1596 * VMPTRLD instruction execution worker.
1597 *
1598 * @param pVCpu The cross context virtual CPU structure.
1599 * @param cbInstr The instruction length.
1600 * @param GCPtrVmcs The linear address of the current VMCS pointer.
1601 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1602 * be NULL.
1603 *
1604 * @remarks Common VMX instruction checks are already expected to by the caller,
1605 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1606 */
1607IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1608 PCVMXVEXITINFO pExitInfo)
1609{
1610 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1611 {
1612 RT_NOREF(pExitInfo);
1613 /** @todo NSTVMX: intercept. */
1614 }
1615 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1616
1617 /* CPL. */
1618 if (CPUMGetGuestCPL(pVCpu) > 0)
1619 {
1620 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1621 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_Cpl;
1622 return iemRaiseGeneralProtectionFault0(pVCpu);
1623 }
1624
1625 /* Get the VMCS pointer from the location specified by the source memory operand. */
1626 RTGCPHYS GCPhysVmcs;
1627 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
1628 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1629 {
1630 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
1631 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrMap;
1632 return rcStrict;
1633 }
1634
1635 /* VMCS pointer alignment. */
1636 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
1637 {
1638 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
1639 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrAlign;
1640 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1641 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1642 return VINF_SUCCESS;
1643 }
1644
1645 /* VMCS physical-address width limits. */
1646 Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT);
1647 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
1648 {
1649 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
1650 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrWidth;
1651 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1652 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1653 return VINF_SUCCESS;
1654 }
1655
1656 /* VMCS is not the VMXON region. */
1657 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
1658 {
1659 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
1660 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrVmxon;
1661 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
1662 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1663 return VINF_SUCCESS;
1664 }
1665
1666 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
1667 restriction imposed by our implementation. */
1668 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
1669 {
1670 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
1671 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrAbnormal;
1672 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1673 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1674 return VINF_SUCCESS;
1675 }
1676
1677 /* Read the VMCS revision ID from the VMCS. */
1678 VMXVMCSREVID VmcsRevId;
1679 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
1680 if (RT_FAILURE(rc))
1681 {
1682 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
1683 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrReadPhys;
1684 return rc;
1685 }
1686
1687 /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
1688 also check VMCS shadowing feature. */
1689 if ( VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
1690 || ( VmcsRevId.n.fIsShadowVmcs
1691 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
1692 {
1693 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
1694 {
1695 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
1696 VmcsRevId.n.u31RevisionId));
1697 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_VmcsRevId;
1698 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
1699 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1700 return VINF_SUCCESS;
1701 }
1702
1703 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
1704 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_ShadowVmcs;
1705 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
1706 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1707 return VINF_SUCCESS;
1708 }
1709
1710 /*
1711 * We only maintain only the current VMCS in our virtual CPU context (CPUMCTX). Therefore,
1712 * VMPTRLD shall always flush any existing current VMCS back to guest memory before loading
1713 * a new VMCS as current.
1714 */
1715 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
1716 {
1717 iemVmxCommitCurrentVmcsToMemory(pVCpu);
1718 IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
1719 }
1720 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_Success;
1721 iemVmxVmSucceed(pVCpu);
1722 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1723 return VINF_SUCCESS;
1724}
1725
1726
1727/**
1728 * VMXON instruction execution worker.
1729 *
1730 * @param pVCpu The cross context virtual CPU structure.
1731 * @param cbInstr The instruction length.
1732 * @param iEffSeg The effective segment register to use with @a
1733 * GCPtrVmxon.
1734 * @param GCPtrVmxon The linear address of the VMXON pointer.
1735 * @param pExitInfo Pointer to the VM-exit instruction information struct.
1736 * Optional, can be NULL.
1737 *
1738 * @remarks Common VMX instruction checks are already expected to by the caller,
1739 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1740 */
1741IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
1742 PCVMXVEXITINFO pExitInfo)
1743{
1744#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1745 RT_NOREF5(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
1746 return VINF_EM_RAW_EMULATE_INSTR;
1747#else
1748 if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
1749 {
1750 /* CPL. */
1751 if (pVCpu->iem.s.uCpl > 0)
1752 {
1753 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1754 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cpl;
1755 return iemRaiseGeneralProtectionFault0(pVCpu);
1756 }
1757
1758 /* A20M (A20 Masked) mode. */
1759 if (!PGMPhysIsA20Enabled(pVCpu))
1760 {
1761 Log(("vmxon: A20M mode -> #GP(0)\n"));
1762 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_A20M;
1763 return iemRaiseGeneralProtectionFault0(pVCpu);
1764 }
1765
1766 /* CR0 fixed bits. */
1767 bool const fUnrestrictedGuest = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxUnrestrictedGuest;
1768 uint64_t const uCr0Fixed0 = fUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX : VMX_V_CR0_FIXED0;
1769 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
1770 {
1771 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
1772 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cr0Fixed0;
1773 return iemRaiseGeneralProtectionFault0(pVCpu);
1774 }
1775
1776 /* CR4 fixed bits. */
1777 if ((pVCpu->cpum.GstCtx.cr4 & VMX_V_CR4_FIXED0) != VMX_V_CR4_FIXED0)
1778 {
1779 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
1780 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cr4Fixed0;
1781 return iemRaiseGeneralProtectionFault0(pVCpu);
1782 }
1783
1784 /* Feature control MSR's LOCK and VMXON bits. */
1785 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
1786 if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
1787 {
1788 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
1789 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_MsrFeatCtl;
1790 return iemRaiseGeneralProtectionFault0(pVCpu);
1791 }
1792
1793 /* Get the VMXON pointer from the location specified by the source memory operand. */
1794 RTGCPHYS GCPhysVmxon;
1795 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
1796 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1797 {
1798 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
1799 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrMap;
1800 return rcStrict;
1801 }
1802
1803 /* VMXON region pointer alignment. */
1804 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
1805 {
1806 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
1807 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrAlign;
1808 iemVmxVmFailInvalid(pVCpu);
1809 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1810 return VINF_SUCCESS;
1811 }
1812
1813 /* VMXON physical-address width limits. */
1814 Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT);
1815 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
1816 {
1817 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
1818 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrWidth;
1819 iemVmxVmFailInvalid(pVCpu);
1820 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1821 return VINF_SUCCESS;
1822 }
1823
1824 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
1825 restriction imposed by our implementation. */
1826 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
1827 {
1828 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
1829 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrAbnormal;
1830 iemVmxVmFailInvalid(pVCpu);
1831 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1832 return VINF_SUCCESS;
1833 }
1834
1835 /* Read the VMCS revision ID from the VMXON region. */
1836 VMXVMCSREVID VmcsRevId;
1837 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
1838 if (RT_FAILURE(rc))
1839 {
1840 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
1841 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrReadPhys;
1842 return rc;
1843 }
1844
1845 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
1846 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
1847 {
1848 /* Revision ID mismatch. */
1849 if (!VmcsRevId.n.fIsShadowVmcs)
1850 {
1851 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
1852 VmcsRevId.n.u31RevisionId));
1853 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmcsRevId;
1854 iemVmxVmFailInvalid(pVCpu);
1855 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1856 return VINF_SUCCESS;
1857 }
1858
1859 /* Shadow VMCS disallowed. */
1860 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
1861 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_ShadowVmcs;
1862 iemVmxVmFailInvalid(pVCpu);
1863 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1864 return VINF_SUCCESS;
1865 }
1866
1867 /*
1868 * Record that we're in VMX operation, block INIT, block and disable A20M.
1869 */
1870 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
1871 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
1872 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
1873 /** @todo NSTVMX: clear address-range monitoring. */
1874 /** @todo NSTVMX: Intel PT. */
1875 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Success;
1876 iemVmxVmSucceed(pVCpu);
1877 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1878# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
1879 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
1880# else
1881 return VINF_SUCCESS;
1882# endif
1883 }
1884 else if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1885 {
1886 RT_NOREF(pExitInfo);
1887 /** @todo NSTVMX: intercept. */
1888 }
1889
1890 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1891
1892 /* CPL. */
1893 if (pVCpu->iem.s.uCpl > 0)
1894 {
1895 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1896 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmxRootCpl;
1897 return iemRaiseGeneralProtectionFault0(pVCpu);
1898 }
1899
1900 /* VMXON when already in VMX root mode. */
1901 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
1902 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmxRoot;
1903 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1904 return VINF_SUCCESS;
1905#endif
1906}
1907
1908
1909/**
1910 * Implements 'VMXON'.
1911 */
1912IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
1913{
1914 return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
1915}
1916
1917
1918/**
1919 * Implements 'VMXOFF'.
1920 */
1921IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
1922{
1923# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1924 RT_NOREF2(pVCpu, cbInstr);
1925 return VINF_EM_RAW_EMULATE_INSTR;
1926# else
1927 IEM_VMX_INSTR_COMMON_CHECKS(pVCpu, "vmxoff", kVmxVInstrDiag_Vmxoff);
1928 if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
1929 {
1930 Log(("vmxoff: Not in VMX root mode -> #GP(0)\n"));
1931 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_VmxRoot;
1932 return iemRaiseUndefinedOpcode(pVCpu);
1933 }
1934
1935 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1936 {
1937 /** @todo NSTVMX: intercept. */
1938 }
1939
1940 /* CPL. */
1941 if (pVCpu->iem.s.uCpl > 0)
1942 {
1943 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1944 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_Cpl;
1945 return iemRaiseGeneralProtectionFault0(pVCpu);
1946 }
1947
1948 /* Dual monitor treatment of SMIs and SMM. */
1949 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
1950 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
1951 {
1952 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
1953 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1954 return VINF_SUCCESS;
1955 }
1956
1957 /*
1958 * Record that we're no longer in VMX root operation, block INIT, block and disable A20M.
1959 */
1960 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
1961 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
1962
1963 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
1964 { /** @todo NSTVMX: Unblock SMI. */ }
1965 /** @todo NSTVMX: Unblock and enable A20M. */
1966 /** @todo NSTVMX: Clear address-range monitoring. */
1967
1968 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_Success;
1969 iemVmxVmSucceed(pVCpu);
1970 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1971# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
1972 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
1973# else
1974 return VINF_SUCCESS;
1975# endif
1976# endif
1977}
1978
1979
1980/**
1981 * Implements 'VMPTRLD'.
1982 */
1983IEM_CIMPL_DEF_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
1984{
1985 return iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
1986}
1987
1988
1989/**
1990 * Implements 'VMPTRST'.
1991 */
1992IEM_CIMPL_DEF_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
1993{
1994 return iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
1995}
1996
1997
1998/**
1999 * Implements 'VMCLEAR'.
2000 */
2001IEM_CIMPL_DEF_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
2002{
2003 return iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
2004}
2005
2006
2007/**
2008 * Implements 'VMWRITE' register.
2009 */
2010IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint32_t, uFieldEnc)
2011{
2012 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, IEMMODE_64BIT /* N/A */, u64Val, uFieldEnc,
2013 NULL /* pExitInfo */);
2014}
2015
2016
2017/**
2018 * Implements 'VMWRITE' memory.
2019 */
2020IEM_CIMPL_DEF_4(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrVal, uint32_t, uFieldEnc)
2021{
2022 return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrVal, uFieldEnc, NULL /* pExitInfo */);
2023}
2024
2025
2026/**
2027 * Implements 'VMREAD' 64-bit register.
2028 */
2029IEM_CIMPL_DEF_2(iemCImpl_vmread64_reg, uint64_t *, pu64Dst, uint32_t, uFieldEnc)
2030{
2031 return iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, NULL /* pExitInfo */);
2032}
2033
2034
2035/**
2036 * Implements 'VMREAD' 32-bit register.
2037 */
2038IEM_CIMPL_DEF_2(iemCImpl_vmread32_reg, uint32_t *, pu32Dst, uint32_t, uFieldEnc)
2039{
2040 return iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, NULL /* pExitInfo */);
2041}
2042
2043
2044/**
2045 * Implements 'VMREAD' memory.
2046 */
2047IEM_CIMPL_DEF_4(iemCImpl_vmread_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrDst, uint32_t, uFieldEnc)
2048{
2049 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, NULL /* pExitInfo */);
2050}
2051
2052
2053#endif
2054
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette