VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 74018

Last change on this file since 74018 was 74018, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 We already have CPL in IEM, avoid calling CPUMGetGuestCPL whenever possible.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 88.7 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 74018 2018-09-01 05:33:17Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/**
20 * Implements 'VMCALL'.
21 */
22IEM_CIMPL_DEF_0(iemCImpl_vmcall)
23{
24 /** @todo NSTVMX: intercept. */
25
26 /* Join forces with vmmcall. */
27 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
28}
29
30#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
31/**
32 * Map of VMCS field encodings to their virtual-VMCS structure offsets.
33 *
34 * The first array dimension is VMCS field encoding of Width OR'ed with Type and the
35 * second dimension is the Index, see VMXVMCSFIELDENC.
36 */
37uint16_t const g_aoffVmcsMap[16][VMX_V_VMCS_MAX_INDEX + 1] =
38{
39 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
40 {
41 /* 0 */ RT_OFFSETOF(VMXVVMCS, u16Vpid),
42 /* 1 */ RT_OFFSETOF(VMXVVMCS, u16PostIntNotifyVector),
43 /* 2 */ RT_OFFSETOF(VMXVVMCS, u16EptpIndex),
44 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
45 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
46 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
47 },
48 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
49 {
50 /* 0-7 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
51 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
52 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
53 /* 24-25 */ UINT16_MAX, UINT16_MAX
54 },
55 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
56 {
57 /* 0 */ RT_OFFSETOF(VMXVVMCS, GuestEs),
58 /* 1 */ RT_OFFSETOF(VMXVVMCS, GuestCs),
59 /* 2 */ RT_OFFSETOF(VMXVVMCS, GuestSs),
60 /* 3 */ RT_OFFSETOF(VMXVVMCS, GuestDs),
61 /* 4 */ RT_OFFSETOF(VMXVVMCS, GuestFs),
62 /* 5 */ RT_OFFSETOF(VMXVVMCS, GuestGs),
63 /* 6 */ RT_OFFSETOF(VMXVVMCS, GuestLdtr),
64 /* 7 */ RT_OFFSETOF(VMXVVMCS, GuestTr),
65 /* 8 */ RT_OFFSETOF(VMXVVMCS, u16GuestIntStatus),
66 /* 9 */ RT_OFFSETOF(VMXVVMCS, u16PmlIndex),
67 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
68 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
69 },
70 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
71 {
72 /* 0 */ RT_OFFSETOF(VMXVVMCS, HostEs),
73 /* 1 */ RT_OFFSETOF(VMXVVMCS, HostCs),
74 /* 2 */ RT_OFFSETOF(VMXVVMCS, HostSs),
75 /* 3 */ RT_OFFSETOF(VMXVVMCS, HostDs),
76 /* 4 */ RT_OFFSETOF(VMXVVMCS, HostFs),
77 /* 5 */ RT_OFFSETOF(VMXVVMCS, HostGs),
78 /* 6 */ RT_OFFSETOF(VMXVVMCS, HostTr),
79 /* 7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
80 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
81 /* 23-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
82 },
83 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
84 {
85 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapA),
86 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
87 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
88 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmExitMsrStore),
89 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmExitMsrLoad),
90 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmEntryMsrLoad),
91 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
92 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64AddrPml),
93 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64TscOffset),
94 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64AddrVirtApic),
95 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64AddrApicAccess),
96 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64AddrPostedIntDesc),
97 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64VmFuncCtls),
98 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64EptpPtr),
99 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap0),
100 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap1),
101 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap2),
102 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap3),
103 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64AddrEptpList),
104 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmreadBitmap),
105 /* 20 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmwriteBitmap),
106 /* 21 */ RT_OFFSETOF(VMXVVMCS, u64AddrXcptVeInfo),
107 /* 22 */ RT_OFFSETOF(VMXVVMCS, u64AddrXssBitmap),
108 /* 23 */ RT_OFFSETOF(VMXVVMCS, u64AddrEnclsBitmap),
109 /* 24 */ UINT16_MAX,
110 /* 25 */ RT_OFFSETOF(VMXVVMCS, u64TscMultiplier)
111 },
112 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
113 {
114 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64GuestPhysAddr),
115 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
116 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
117 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
118 /* 25 */ UINT16_MAX
119 },
120 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
121 {
122 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
123 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
124 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestPatMsr),
125 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEferMsr),
126 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
127 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte0),
128 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte1),
129 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte2),
130 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte3),
131 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
132 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
133 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
134 },
135 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
136 {
137 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostPatMsr),
138 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostEferMsr),
139 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
140 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
141 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
142 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
143 },
144 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
145 {
146 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32PinCtls),
147 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls),
148 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32XcptBitmap),
149 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMask),
150 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMatch),
151 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32Cr3TargetCount),
152 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32ExitCtls),
153 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
154 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
155 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32EntryCtls),
156 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
157 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32EntryIntInfo),
158 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
159 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32EntryInstrLen),
160 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32TprTreshold),
161 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls2),
162 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32PleGap),
163 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32PleWindow),
164 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
165 },
166 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
167 {
168 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32RoVmInstrError),
169 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitReason),
170 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitIntInfo),
171 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitErrCode),
172 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
173 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
174 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitInstrLen),
175 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitInstrInfo),
176 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
177 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
178 /* 24-25 */ UINT16_MAX, UINT16_MAX
179 },
180 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
181 {
182 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
183 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsLimit),
184 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsLimit),
185 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsLimit),
186 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
187 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsLimit),
188 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsLimit),
189 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
190 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrLimit),
191 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
192 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
193 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsAttr),
194 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsAttr),
195 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsAttr),
196 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsAttr),
197 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsAttr),
198 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsAttr),
199 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
200 /* 18 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrAttr),
201 /* 19 */ RT_OFFSETOF(VMXVVMCS, u32GuestIntrState),
202 /* 20 */ RT_OFFSETOF(VMXVVMCS, u32GuestActivityState),
203 /* 21 */ RT_OFFSETOF(VMXVVMCS, u32GuestSmBase),
204 /* 22 */ RT_OFFSETOF(VMXVVMCS, u32GuestSysenterCS),
205 /* 23 */ RT_OFFSETOF(VMXVVMCS, u32PreemptTimer),
206 /* 24-25 */ UINT16_MAX, UINT16_MAX
207 },
208 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
209 {
210 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32HostSysenterCs),
211 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
212 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
213 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
214 /* 25 */ UINT16_MAX
215 },
216 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_CONTROL: */
217 {
218 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64Cr0Mask),
219 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64Cr4Mask),
220 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
221 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
222 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target0),
223 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target1),
224 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target2),
225 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target3),
226 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
227 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
228 /* 24-25 */ UINT16_MAX, UINT16_MAX
229 },
230 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
231 {
232 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64ExitQual),
233 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64IoRcx),
234 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64IoRsi),
235 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64IoRdi),
236 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64IoRip),
237 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestLinearAddr),
238 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
239 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
240 /* 22-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
241 },
242 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
243 {
244 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr0),
245 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr3),
246 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr4),
247 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEsBase),
248 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestCsBase),
249 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestSsBase),
250 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestDsBase),
251 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestFsBase),
252 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestGsBase),
253 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestLdtrBase),
254 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64GuestTrBase),
255 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64GuestGdtrBase),
256 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64GuestIdtrBase),
257 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64GuestDr7),
258 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64GuestRsp),
259 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64GuestRip),
260 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64GuestRFlags),
261 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpt),
262 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
263 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEip),
264 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
265 },
266 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_HOST_STATE: */
267 {
268 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostCr0),
269 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostCr3),
270 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostCr4),
271 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64HostFsBase),
272 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64HostGsBase),
273 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64HostTrBase),
274 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64HostGdtrBase),
275 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64HostIdtrBase),
276 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEsp),
277 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEip),
278 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64HostRsp),
279 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64HostRip),
280 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
281 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
282 }
283};
284
285
286/**
287 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
288 * relative offsets.
289 */
290# ifdef IEM_WITH_CODE_TLB
291# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
292# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
293# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
294# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
295# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
296# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
297# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
298# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
299# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
300# else /* !IEM_WITH_CODE_TLB */
301# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
302 do \
303 { \
304 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
305 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
306 } while (0)
307
308# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
309
310# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
311 do \
312 { \
313 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
314 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
315 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
316 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
317 } while (0)
318
319# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
320 do \
321 { \
322 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
323 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
324 } while (0)
325
326# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
327 do \
328 { \
329 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
330 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
331 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
332 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
333 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
334 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
335 } while (0)
336
337# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
338 do \
339 { \
340 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
341 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
342 } while (0)
343
344# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
345 do \
346 { \
347 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
348 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
349 } while (0)
350
351# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
352 do \
353 { \
354 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
355 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
356 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
357 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
358 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
359 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
360 } while (0)
361# endif /* !IEM_WITH_CODE_TLB */
362
363/** Whether a shadow VMCS is present for the given VCPU. */
364#define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
365
366/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
367#define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u64VmcsLinkPtr.u)
368
369/** Whether a current VMCS is present for the given VCPU. */
370#define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
371
372/** Gets the guest-physical address of the current VMCS for the given VCPU. */
373#define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
374
375/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
376#define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
377 do \
378 { \
379 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
380 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
381 } while (0)
382
383/** Clears any current VMCS for the given VCPU. */
384#define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
385 do \
386 { \
387 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
388 } while (0)
389
390/** Check the common VMX instruction preconditions.
391 * @note Any changes here, also check if IEMOP_HLP_VMX_INSTR needs updating.
392 */
393#define IEM_VMX_INSTR_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
394 do { \
395 if ( !IEM_IS_REAL_OR_V86_MODE(a_pVCpu) \
396 && ( !IEM_IS_LONG_MODE(a_pVCpu) \
397 || IEM_IS_64BIT_CODE(a_pVCpu))) \
398 { /* likely */ } \
399 else \
400 { \
401 if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \
402 { \
403 Log((a_szInstr ": Real or v8086 mode -> #UD\n")); \
404 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_RealOrV86Mode; \
405 return iemRaiseUndefinedOpcode(a_pVCpu); \
406 } \
407 if (IEM_IS_LONG_MODE(a_pVCpu) && !IEM_IS_64BIT_CODE(a_pVCpu)) \
408 { \
409 Log((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
410 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_LongModeCS; \
411 return iemRaiseUndefinedOpcode(a_pVCpu); \
412 } \
413 } \
414 } while (0)
415
416/** Check for VMX instructions requiring to be in VMX operation.
417 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs udpating. */
418#define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
419 do \
420 { \
421 if (IEM_IS_VMX_ROOT_MODE(a_pVCpu)) \
422 { /* likely */ } \
423 else \
424 { \
425 Log((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
426 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_VmxRoot; \
427 return iemRaiseUndefinedOpcode(a_pVCpu); \
428 } \
429 } while (0)
430
431
432/**
433 * Returns whether the given VMCS field is valid and supported by our emulation.
434 *
435 * @param pVCpu The cross context virtual CPU structure.
436 * @param uFieldEnc The VMCS field encoding.
437 *
438 * @remarks This takes into account the CPU features exposed to the guest.
439 */
440IEM_STATIC bool iemVmxIsVmcsFieldValid(PVMCPU pVCpu, uint32_t uFieldEnc)
441{
442 PCCPUMFEATURES pFeat = IEM_GET_GUEST_CPU_FEATURES(pVCpu);
443 switch (uFieldEnc)
444 {
445 /*
446 * 16-bit fields.
447 */
448 /* Control fields. */
449 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
450 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
451 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
452
453 /* Guest-state fields. */
454 case VMX_VMCS16_GUEST_ES_SEL:
455 case VMX_VMCS16_GUEST_CS_SEL:
456 case VMX_VMCS16_GUEST_SS_SEL:
457 case VMX_VMCS16_GUEST_DS_SEL:
458 case VMX_VMCS16_GUEST_FS_SEL:
459 case VMX_VMCS16_GUEST_GS_SEL:
460 case VMX_VMCS16_GUEST_LDTR_SEL:
461 case VMX_VMCS16_GUEST_TR_SEL:
462 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
463 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
464
465 /* Host-state fields. */
466 case VMX_VMCS16_HOST_ES_SEL:
467 case VMX_VMCS16_HOST_CS_SEL:
468 case VMX_VMCS16_HOST_SS_SEL:
469 case VMX_VMCS16_HOST_DS_SEL:
470 case VMX_VMCS16_HOST_FS_SEL:
471 case VMX_VMCS16_HOST_GS_SEL:
472 case VMX_VMCS16_HOST_TR_SEL: return true;
473
474 /*
475 * 64-bit fields.
476 */
477 /* Control fields. */
478 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
479 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
480 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
481 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
482 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
483 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
484 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
485 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
486 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
487 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
488 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
489 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
490 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
491 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
492 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
493 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
494 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
495 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
496 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
497 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
498 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
499 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
500 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
501 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
502 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
503 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
504 case VMX_VMCS64_CTRL_EPTP_FULL:
505 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
506 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
507 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
508 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
509 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
510 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
511 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
512 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
513 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
514 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
515 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
516 {
517 uint64_t const uVmFuncMsr = CPUMGetGuestIa32VmxVmFunc(pVCpu);
518 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
519 }
520 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
521 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
522 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
523 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
524 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
525 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
526 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
527 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
528 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
529 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
530 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
531 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
532
533 /* Read-only data fields. */
534 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
535 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
536
537 /* Guest-state fields. */
538 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
539 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
540 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
541 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
542 case VMX_VMCS64_GUEST_PAT_FULL:
543 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
544 case VMX_VMCS64_GUEST_EFER_FULL:
545 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
546 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
547 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
548 case VMX_VMCS64_GUEST_PDPTE0_FULL:
549 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
550 case VMX_VMCS64_GUEST_PDPTE1_FULL:
551 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
552 case VMX_VMCS64_GUEST_PDPTE2_FULL:
553 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
554 case VMX_VMCS64_GUEST_PDPTE3_FULL:
555 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
556 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
557 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
558
559 /* Host-state fields. */
560 case VMX_VMCS64_HOST_PAT_FULL:
561 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
562 case VMX_VMCS64_HOST_EFER_FULL:
563 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
564 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
565 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
566
567 /*
568 * 32-bit fields.
569 */
570 /* Control fields. */
571 case VMX_VMCS32_CTRL_PIN_EXEC:
572 case VMX_VMCS32_CTRL_PROC_EXEC:
573 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
574 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
575 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
576 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
577 case VMX_VMCS32_CTRL_EXIT:
578 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
579 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
580 case VMX_VMCS32_CTRL_ENTRY:
581 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
582 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
583 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
584 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
585 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
586 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
587 case VMX_VMCS32_CTRL_PLE_GAP:
588 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
589
590 /* Read-only data fields. */
591 case VMX_VMCS32_RO_VM_INSTR_ERROR:
592 case VMX_VMCS32_RO_EXIT_REASON:
593 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
594 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
595 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
596 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
597 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
598 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
599
600 /* Guest-state fields. */
601 case VMX_VMCS32_GUEST_ES_LIMIT:
602 case VMX_VMCS32_GUEST_CS_LIMIT:
603 case VMX_VMCS32_GUEST_SS_LIMIT:
604 case VMX_VMCS32_GUEST_DS_LIMIT:
605 case VMX_VMCS32_GUEST_FS_LIMIT:
606 case VMX_VMCS32_GUEST_GS_LIMIT:
607 case VMX_VMCS32_GUEST_LDTR_LIMIT:
608 case VMX_VMCS32_GUEST_TR_LIMIT:
609 case VMX_VMCS32_GUEST_GDTR_LIMIT:
610 case VMX_VMCS32_GUEST_IDTR_LIMIT:
611 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
612 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
613 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
614 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
615 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
616 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
617 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
618 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
619 case VMX_VMCS32_GUEST_INT_STATE:
620 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
621 case VMX_VMCS32_GUEST_SMBASE:
622 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
623 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
624
625 /* Host-state fields. */
626 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
627
628 /*
629 * Natural-width fields.
630 */
631 /* Control fields. */
632 case VMX_VMCS_CTRL_CR0_MASK:
633 case VMX_VMCS_CTRL_CR4_MASK:
634 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
635 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
636 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
637 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
638 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
639 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
640
641 /* Read-only data fields. */
642 case VMX_VMCS_RO_EXIT_QUALIFICATION:
643 case VMX_VMCS_RO_IO_RCX:
644 case VMX_VMCS_RO_IO_RSX:
645 case VMX_VMCS_RO_IO_RDI:
646 case VMX_VMCS_RO_IO_RIP:
647 case VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR: return true;
648
649 /* Guest-state fields. */
650 case VMX_VMCS_GUEST_CR0:
651 case VMX_VMCS_GUEST_CR3:
652 case VMX_VMCS_GUEST_CR4:
653 case VMX_VMCS_GUEST_ES_BASE:
654 case VMX_VMCS_GUEST_CS_BASE:
655 case VMX_VMCS_GUEST_SS_BASE:
656 case VMX_VMCS_GUEST_DS_BASE:
657 case VMX_VMCS_GUEST_FS_BASE:
658 case VMX_VMCS_GUEST_GS_BASE:
659 case VMX_VMCS_GUEST_LDTR_BASE:
660 case VMX_VMCS_GUEST_TR_BASE:
661 case VMX_VMCS_GUEST_GDTR_BASE:
662 case VMX_VMCS_GUEST_IDTR_BASE:
663 case VMX_VMCS_GUEST_DR7:
664 case VMX_VMCS_GUEST_RSP:
665 case VMX_VMCS_GUEST_RIP:
666 case VMX_VMCS_GUEST_RFLAGS:
667 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
668 case VMX_VMCS_GUEST_SYSENTER_ESP:
669 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
670
671 /* Host-state fields. */
672 case VMX_VMCS_HOST_CR0:
673 case VMX_VMCS_HOST_CR3:
674 case VMX_VMCS_HOST_CR4:
675 case VMX_VMCS_HOST_FS_BASE:
676 case VMX_VMCS_HOST_GS_BASE:
677 case VMX_VMCS_HOST_TR_BASE:
678 case VMX_VMCS_HOST_GDTR_BASE:
679 case VMX_VMCS_HOST_IDTR_BASE:
680 case VMX_VMCS_HOST_SYSENTER_ESP:
681 case VMX_VMCS_HOST_SYSENTER_EIP:
682 case VMX_VMCS_HOST_RSP:
683 case VMX_VMCS_HOST_RIP: return true;
684 }
685
686 return false;
687}
688
689
690/**
691 * Gets VM-exit instruction information along with any displacement for an
692 * instruction VM-exit.
693 *
694 * @returns The VM-exit instruction information.
695 * @param pVCpu The cross context virtual CPU structure.
696 * @param uExitReason The VM-exit reason.
697 * @param uInstrId The VM-exit instruction identity (VMX_INSTR_ID_XXX) if
698 * any. Pass VMX_INSTR_ID_NONE otherwise.
699 * @param fPrimaryOpRead If the primary operand of the ModR/M byte (bits 0:3) is
700 * a read or write.
701 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
702 * NULL.
703 */
704IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, bool fPrimaryOpRead,
705 PRTGCPTR pGCPtrDisp)
706{
707 RTGCPTR GCPtrDisp;
708 VMXEXITINSTRINFO ExitInstrInfo;
709 ExitInstrInfo.u = 0;
710
711 /*
712 * Get and parse the ModR/M byte from our decoded opcodes.
713 */
714 uint8_t bRm;
715 uint8_t const offModRm = pVCpu->iem.s.offModRm;
716 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
717 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
718 {
719 /*
720 * ModR/M indicates register addressing.
721 *
722 * The primary/secondary register operands are reported in the iReg1 or iReg2
723 * fields depending on whether it is a read/write form.
724 */
725 uint8_t idxReg1;
726 uint8_t idxReg2;
727 if (fPrimaryOpRead)
728 {
729 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
730 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
731 }
732 else
733 {
734 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
735 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
736 }
737 ExitInstrInfo.All.u2Scaling = 0;
738 ExitInstrInfo.All.iReg1 = idxReg1;
739 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
740 ExitInstrInfo.All.fIsRegOperand = 1;
741 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
742 ExitInstrInfo.All.iSegReg = 0;
743 ExitInstrInfo.All.iIdxReg = 0;
744 ExitInstrInfo.All.fIdxRegInvalid = 1;
745 ExitInstrInfo.All.iBaseReg = 0;
746 ExitInstrInfo.All.fBaseRegInvalid = 1;
747 ExitInstrInfo.All.iReg2 = idxReg2;
748
749 /* Displacement not applicable for register addressing. */
750 GCPtrDisp = 0;
751 }
752 else
753 {
754 /*
755 * ModR/M indicates memory addressing.
756 */
757 uint8_t uScale = 0;
758 bool fBaseRegValid = false;
759 bool fIdxRegValid = false;
760 uint8_t iBaseReg = 0;
761 uint8_t iIdxReg = 0;
762 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
763 {
764 /*
765 * Parse the ModR/M, displacement for 16-bit addressing mode.
766 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
767 */
768 uint16_t u16Disp = 0;
769 uint8_t const offDisp = offModRm + sizeof(bRm);
770 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
771 {
772 /* Displacement without any registers. */
773 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
774 }
775 else
776 {
777 /* Register (index and base). */
778 switch (bRm & X86_MODRM_RM_MASK)
779 {
780 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
781 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
782 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
783 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
784 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
785 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
786 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
787 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
788 }
789
790 /* Register + displacement. */
791 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
792 {
793 case 0: break;
794 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
795 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
796 default:
797 {
798 /* Register addressing, handled at the beginning. */
799 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
800 break;
801 }
802 }
803 }
804
805 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
806 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
807 }
808 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
809 {
810 /*
811 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
812 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
813 */
814 uint32_t u32Disp = 0;
815 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
816 {
817 /* Displacement without any registers. */
818 uint8_t const offDisp = offModRm + sizeof(bRm);
819 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
820 }
821 else
822 {
823 /* Register (and perhaps scale, index and base). */
824 uint8_t offDisp = offModRm + sizeof(bRm);
825 iBaseReg = (bRm & X86_MODRM_RM_MASK);
826 if (iBaseReg == 4)
827 {
828 /* An SIB byte follows the ModR/M byte, parse it. */
829 uint8_t bSib;
830 uint8_t const offSib = offModRm + sizeof(bRm);
831 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
832
833 /* A displacement may follow SIB, update its offset. */
834 offDisp += sizeof(bSib);
835
836 /* Get the scale. */
837 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
838
839 /* Get the index register. */
840 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
841 fIdxRegValid = RT_BOOL(iIdxReg != 4);
842
843 /* Get the base register. */
844 iBaseReg = bSib & X86_SIB_BASE_MASK;
845 fBaseRegValid = true;
846 if (iBaseReg == 5)
847 {
848 if ((bRm & X86_MODRM_MOD_MASK) == 0)
849 {
850 /* Mod is 0 implies a 32-bit displacement with no base. */
851 fBaseRegValid = false;
852 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
853 }
854 else
855 {
856 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
857 iBaseReg = X86_GREG_xBP;
858 }
859 }
860 }
861
862 /* Register + displacement. */
863 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
864 {
865 case 0: /* Handled above */ break;
866 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
867 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
868 default:
869 {
870 /* Register addressing, handled at the beginning. */
871 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
872 break;
873 }
874 }
875 }
876
877 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
878 }
879 else
880 {
881 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
882
883 /*
884 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
885 * See Intel instruction spec. 2.2 "IA-32e Mode".
886 */
887 uint64_t u64Disp = 0;
888 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
889 if (fRipRelativeAddr)
890 {
891 /*
892 * RIP-relative addressing mode.
893 *
894 * The displacment is 32-bit signed implying an offset range of +/-2G.
895 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
896 */
897 uint8_t const offDisp = offModRm + sizeof(bRm);
898 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
899 }
900 else
901 {
902 uint8_t offDisp = offModRm + sizeof(bRm);
903
904 /*
905 * Register (and perhaps scale, index and base).
906 *
907 * REX.B extends the most-significant bit of the base register. However, REX.B
908 * is ignored while determining whether an SIB follows the opcode. Hence, we
909 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
910 *
911 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
912 */
913 iBaseReg = (bRm & X86_MODRM_RM_MASK);
914 if (iBaseReg == 4)
915 {
916 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
917 uint8_t bSib;
918 uint8_t const offSib = offModRm + sizeof(bRm);
919 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
920
921 /* Displacement may follow SIB, update its offset. */
922 offDisp += sizeof(bSib);
923
924 /* Get the scale. */
925 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
926
927 /* Get the index. */
928 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
929 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
930
931 /* Get the base. */
932 iBaseReg = (bSib & X86_SIB_BASE_MASK);
933 fBaseRegValid = true;
934 if (iBaseReg == 5)
935 {
936 if ((bRm & X86_MODRM_MOD_MASK) == 0)
937 {
938 /* Mod is 0 implies a signed 32-bit displacement with no base. */
939 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
940 }
941 else
942 {
943 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
944 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
945 }
946 }
947 }
948 iBaseReg |= pVCpu->iem.s.uRexB;
949
950 /* Register + displacement. */
951 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
952 {
953 case 0: /* Handled above */ break;
954 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
955 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
956 default:
957 {
958 /* Register addressing, handled at the beginning. */
959 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
960 break;
961 }
962 }
963 }
964
965 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
966 }
967
968 /*
969 * The primary or secondary register operand is reported in iReg2 depending
970 * on whether the primary operand is in read/write form.
971 */
972 uint8_t idxReg2;
973 if (fPrimaryOpRead)
974 {
975 idxReg2 = bRm & X86_MODRM_RM_MASK;
976 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
977 idxReg2 |= pVCpu->iem.s.uRexB;
978 }
979 else
980 {
981 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK;
982 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
983 idxReg2 |= pVCpu->iem.s.uRexReg;
984 }
985 ExitInstrInfo.All.u2Scaling = uScale;
986 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */
987 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
988 ExitInstrInfo.All.fIsRegOperand = 0;
989 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
990 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
991 ExitInstrInfo.All.iIdxReg = iIdxReg;
992 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
993 ExitInstrInfo.All.iBaseReg = iBaseReg;
994 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
995 ExitInstrInfo.All.iReg2 = idxReg2;
996 }
997
998 /*
999 * Handle exceptions for certain instructions.
1000 * (e.g. some instructions convey an instruction identity).
1001 */
1002 switch (uExitReason)
1003 {
1004 case VMX_EXIT_GDTR_IDTR_ACCESS:
1005 {
1006 Assert(VMX_INSTR_ID_IS_VALID(uInstrId));
1007 ExitInstrInfo.GdtIdt.u2InstrId = VMX_INSTR_ID_GET_ID(uInstrId);
1008 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
1009 break;
1010 }
1011
1012 case VMX_EXIT_LDTR_TR_ACCESS:
1013 {
1014 Assert(VMX_INSTR_ID_IS_VALID(uInstrId));
1015 ExitInstrInfo.LdtTr.u2InstrId = VMX_INSTR_ID_GET_ID(uInstrId);
1016 ExitInstrInfo.LdtTr.u2Undef0 = 0;
1017 break;
1018 }
1019
1020 case VMX_EXIT_RDRAND:
1021 case VMX_EXIT_RDSEED:
1022 {
1023 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
1024 break;
1025 }
1026 }
1027
1028 /* Update displacement and return the constructed VM-exit instruction information field. */
1029 if (pGCPtrDisp)
1030 *pGCPtrDisp = GCPtrDisp;
1031 return ExitInstrInfo.u;
1032}
1033
1034
1035/**
1036 * Implements VMSucceed for VMX instruction success.
1037 *
1038 * @param pVCpu The cross context virtual CPU structure.
1039 */
1040DECLINLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
1041{
1042 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1043}
1044
1045
1046/**
1047 * Implements VMFailInvalid for VMX instruction failure.
1048 *
1049 * @param pVCpu The cross context virtual CPU structure.
1050 */
1051DECLINLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
1052{
1053 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1054 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
1055}
1056
1057
1058/**
1059 * Implements VMFailValid for VMX instruction failure.
1060 *
1061 * @param pVCpu The cross context virtual CPU structure.
1062 * @param enmInsErr The VM instruction error.
1063 */
1064DECLINLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1065{
1066 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1067 {
1068 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1069 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
1070 /** @todo NSTVMX: VMWrite enmInsErr to VM-instruction error field. */
1071 RT_NOREF(enmInsErr);
1072 }
1073}
1074
1075
1076/**
1077 * Implements VMFail for VMX instruction failure.
1078 *
1079 * @param pVCpu The cross context virtual CPU structure.
1080 * @param enmInsErr The VM instruction error.
1081 */
1082DECLINLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1083{
1084 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1085 {
1086 iemVmxVmFailValid(pVCpu, enmInsErr);
1087 /** @todo Set VM-instruction error field in the current virtual-VMCS. */
1088 }
1089 else
1090 iemVmxVmFailInvalid(pVCpu);
1091}
1092
1093
1094/**
1095 * Flushes the current VMCS contents back to guest memory.
1096 *
1097 * @returns VBox status code.
1098 * @param pVCpu The cross context virtual CPU structure.
1099 */
1100DECLINLINE(int) iemVmxCommitCurrentVmcsToMemory(PVMCPU pVCpu)
1101{
1102 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1103 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
1104 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));
1105 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
1106 return rc;
1107}
1108
1109
1110/**
1111 * Implements VMSucceed for the VMREAD instruction and increments the guest RIP.
1112 *
1113 * @param pVCpu The cross context virtual CPU structure.
1114 */
1115DECLINLINE(void) iemVmxVmreadSuccess(PVMCPU pVCpu, uint8_t cbInstr)
1116{
1117 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_Success;
1118 iemVmxVmSucceed(pVCpu);
1119 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1120}
1121
1122
1123/**
1124 * VMREAD common (memory/register) instruction execution worker
1125 *
1126 * @param pVCpu The cross context virtual CPU structure.
1127 * @param cbInstr The instruction length.
1128 * @param pu64Dst Where to write the VMCS value (only updated when
1129 * VINF_SUCCESS is returned).
1130 * @param uFieldEnc The VMCS field encoding.
1131 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1132 * be NULL.
1133 */
1134IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint32_t uFieldEnc,
1135 PCVMXVEXITINFO pExitInfo)
1136{
1137 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1138 {
1139 RT_NOREF(pExitInfo); RT_NOREF(cbInstr);
1140 /** @todo NSTVMX: intercept. */
1141 /** @todo NSTVMX: VMCS shadowing intercept (VMREAD bitmap). */
1142 }
1143
1144 /* CPL. */
1145 if (pVCpu->iem.s.uCpl > 0)
1146 {
1147 Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1148 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_Cpl;
1149 return iemRaiseGeneralProtectionFault0(pVCpu);
1150 }
1151
1152 /* VMCS pointer in root mode. */
1153 if ( IEM_IS_VMX_ROOT_MODE(pVCpu)
1154 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1155 {
1156 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
1157 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_PtrInvalid;
1158 iemVmxVmFailInvalid(pVCpu);
1159 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1160 return VINF_SUCCESS;
1161 }
1162
1163 /* VMCS-link pointer in non-root mode. */
1164 if ( IEM_IS_VMX_NON_ROOT_MODE(pVCpu)
1165 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
1166 {
1167 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
1168 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_LinkPtrInvalid;
1169 iemVmxVmFailInvalid(pVCpu);
1170 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1171 return VINF_SUCCESS;
1172 }
1173
1174 /* Supported VMCS field. */
1175 if (!iemVmxIsVmcsFieldValid(pVCpu, uFieldEnc))
1176 {
1177 Log(("vmread: VMCS field %#x invalid -> VMFail\n", uFieldEnc));
1178 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_FieldInvalid;
1179 iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
1180 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1181 return VINF_SUCCESS;
1182 }
1183
1184 /*
1185 * Setup reading from the current or shadow VMCS.
1186 */
1187 uint8_t *pbVmcs;
1188 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1189 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
1190 else
1191 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1192 Assert(pbVmcs);
1193
1194 PCVMXVMCSFIELDENC pFieldEnc = (PCVMXVMCSFIELDENC)&uFieldEnc;
1195 uint8_t const uWidth = pFieldEnc->n.u2Width;
1196 uint8_t const uType = pFieldEnc->n.u2Type;
1197 uint8_t const uWidthType = (uWidth << 2) | uType;
1198 uint8_t const uIndex = pFieldEnc->n.u8Index;
1199 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
1200 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
1201
1202 /*
1203 * Read the VMCS component based on the field's effective width.
1204 *
1205 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
1206 * indicates high bits (little endian).
1207 *
1208 * Note! The caller is responsible to trim the result and update registers
1209 * or memory locations are required. Here we just zero-extend to the largest
1210 * type (i.e. 64-bits).
1211 */
1212 uint8_t *pbField = pbVmcs + offField;
1213 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(uFieldEnc);
1214 switch (uEffWidth)
1215 {
1216 case VMX_VMCS_ENC_WIDTH_64BIT:
1217 case VMX_VMCS_ENC_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break;
1218 case VMX_VMCS_ENC_WIDTH_32BIT: *pu64Dst = *(uint32_t *)pbField; break;
1219 case VMX_VMCS_ENC_WIDTH_16BIT: *pu64Dst = *(uint16_t *)pbField; break;
1220 }
1221 return VINF_SUCCESS;
1222}
1223
1224
1225/**
1226 * VMREAD (64-bit register) instruction execution worker.
1227 *
1228 * @param pVCpu The cross context virtual CPU structure.
1229 * @param cbInstr The instruction length.
1230 * @param pu64Dst Where to store the VMCS field's value.
1231 * @param uFieldEnc The VMCS field encoding.
1232 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1233 * be NULL.
1234 */
1235IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint32_t uFieldEnc,
1236 PCVMXVEXITINFO pExitInfo)
1237{
1238 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
1239 if (rcStrict == VINF_SUCCESS)
1240 {
1241 iemVmxVmreadSuccess(pVCpu, cbInstr);
1242 return VINF_SUCCESS;
1243 }
1244
1245 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1246 return rcStrict;
1247}
1248
1249
1250/**
1251 * VMREAD (32-bit register) instruction execution worker.
1252 *
1253 * @param pVCpu The cross context virtual CPU structure.
1254 * @param cbInstr The instruction length.
1255 * @param pu32Dst Where to store the VMCS field's value.
1256 * @param uFieldEnc The VMCS field encoding.
1257 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1258 * be NULL.
1259 */
1260IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPU pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint32_t uFieldEnc,
1261 PCVMXVEXITINFO pExitInfo)
1262{
1263 uint64_t u64Dst;
1264 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, uFieldEnc, pExitInfo);
1265 if (rcStrict == VINF_SUCCESS)
1266 {
1267 *pu32Dst = u64Dst;
1268 iemVmxVmreadSuccess(pVCpu, cbInstr);
1269 return VINF_SUCCESS;
1270 }
1271
1272 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1273 return rcStrict;
1274}
1275
1276
1277/**
1278 * VMREAD (memory) instruction execution worker.
1279 *
1280 * @param pVCpu The cross context virtual CPU structure.
1281 * @param cbInstr The instruction length.
1282 * @param iEffSeg The effective segment register to use with @a u64Val.
1283 * Pass UINT8_MAX if it is a register access.
1284 * @param enmEffAddrMode The effective addressing mode (only used with memory
1285 * operand).
1286 * @param GCPtrDst The guest linear address to store the VMCS field's
1287 * value.
1288 * @param uFieldEnc The VMCS field encoding.
1289 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1290 * be NULL.
1291 */
1292IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode,
1293 RTGCPTR GCPtrDst, uint32_t uFieldEnc, PCVMXVEXITINFO pExitInfo)
1294{
1295 uint64_t u64Dst;
1296 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, uFieldEnc, pExitInfo);
1297 if (rcStrict == VINF_SUCCESS)
1298 {
1299 /*
1300 * Write the VMCS field's value to the location specified in guest-memory.
1301 *
1302 * The pointer size depends on the address size (address-size prefix allowed).
1303 * The operand size depends on IA-32e mode (operand-size prefix not allowed).
1304 */
1305 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
1306 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
1307 GCPtrDst &= s_auAddrSizeMasks[enmEffAddrMode];
1308
1309 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1310 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
1311 else
1312 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
1313 if (rcStrict == VINF_SUCCESS)
1314 {
1315 iemVmxVmreadSuccess(pVCpu, cbInstr);
1316 return VINF_SUCCESS;
1317 }
1318
1319 Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
1320 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmread_PtrMap;
1321 return rcStrict;
1322 }
1323
1324 Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1325 return rcStrict;
1326}
1327
1328
1329/**
1330 * VMWRITE instruction execution worker.
1331 *
1332 * @param pVCpu The cross context virtual CPU structure.
1333 * @param cbInstr The instruction length.
1334 * @param iEffSeg The effective segment register to use with @a u64Val.
1335 * Pass UINT8_MAX if it is a register access.
1336 * @param enmEffAddrMode The effective addressing mode (only used with memory
1337 * operand).
1338 * @param u64Val The value to write (or guest linear address to the
1339 * value), @a iEffSeg will indicate if it's a memory
1340 * operand.
1341 * @param uFieldEnc The VMCS field encoding.
1342 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1343 * be NULL.
1344 */
1345IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, uint64_t u64Val,
1346 uint32_t uFieldEnc, PCVMXVEXITINFO pExitInfo)
1347{
1348 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1349 {
1350 RT_NOREF(pExitInfo);
1351 /** @todo NSTVMX: intercept. */
1352 /** @todo NSTVMX: VMCS shadowing intercept (VMWRITE bitmap). */
1353 }
1354
1355 /* CPL. */
1356 if (pVCpu->iem.s.uCpl > 0)
1357 {
1358 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1359 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_Cpl;
1360 return iemRaiseGeneralProtectionFault0(pVCpu);
1361 }
1362
1363 /* VMCS pointer in root mode. */
1364 if ( IEM_IS_VMX_ROOT_MODE(pVCpu)
1365 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1366 {
1367 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
1368 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_PtrInvalid;
1369 iemVmxVmFailInvalid(pVCpu);
1370 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1371 return VINF_SUCCESS;
1372 }
1373
1374 /* VMCS-link pointer in non-root mode. */
1375 if ( IEM_IS_VMX_NON_ROOT_MODE(pVCpu)
1376 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
1377 {
1378 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
1379 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_LinkPtrInvalid;
1380 iemVmxVmFailInvalid(pVCpu);
1381 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1382 return VINF_SUCCESS;
1383 }
1384
1385 /* If the VMWRITE instruction references memory, access the specified memory operand. */
1386 bool const fIsRegOperand = iEffSeg == UINT8_MAX;
1387 if (!fIsRegOperand)
1388 {
1389 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
1390 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
1391 RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[enmEffAddrMode];
1392
1393 /* Read the value from the specified guest memory location. */
1394 VBOXSTRICTRC rcStrict;
1395 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1396 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
1397 else
1398 {
1399 uint32_t u32Val;
1400 rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, iEffSeg, GCPtrVal);
1401 u64Val = u32Val;
1402 }
1403 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1404 {
1405 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
1406 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_PtrMap;
1407 return rcStrict;
1408 }
1409 }
1410 else
1411 Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
1412
1413 /* Supported VMCS field. */
1414 if (!iemVmxIsVmcsFieldValid(pVCpu, uFieldEnc))
1415 {
1416 Log(("vmwrite: VMCS field %#x invalid -> VMFail\n", uFieldEnc));
1417 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_FieldInvalid;
1418 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
1419 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1420 return VINF_SUCCESS;
1421 }
1422
1423 /* Read-only VMCS field. */
1424 bool const fReadOnlyField = HMVmxIsVmcsFieldReadOnly(uFieldEnc);
1425 if ( fReadOnlyField
1426 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
1427 {
1428 Log(("vmwrite: Write to read-only VMCS component %#x -> VMFail\n", uFieldEnc));
1429 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_FieldRo;
1430 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
1431 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1432 return VINF_SUCCESS;
1433 }
1434
1435 /*
1436 * Setup writing to the current or shadow VMCS.
1437 */
1438 uint8_t *pbVmcs;
1439 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1440 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
1441 else
1442 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1443 Assert(pbVmcs);
1444
1445 PCVMXVMCSFIELDENC pFieldEnc = (PCVMXVMCSFIELDENC)&uFieldEnc;
1446 uint8_t const uWidth = pFieldEnc->n.u2Width;
1447 uint8_t const uType = pFieldEnc->n.u2Type;
1448 uint8_t const uWidthType = (uWidth << 2) | uType;
1449 uint8_t const uIndex = pFieldEnc->n.u8Index;
1450 AssertRCReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
1451 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
1452
1453 /*
1454 * Write the VMCS component based on the field's effective width.
1455 *
1456 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
1457 * indicates high bits (little endian).
1458 */
1459 uint8_t *pbField = pbVmcs + offField;
1460 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(uFieldEnc);
1461 switch (uEffWidth)
1462 {
1463 case VMX_VMCS_ENC_WIDTH_64BIT:
1464 case VMX_VMCS_ENC_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
1465 case VMX_VMCS_ENC_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break;
1466 case VMX_VMCS_ENC_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break;
1467 }
1468
1469 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmwrite_Success;
1470 iemVmxVmSucceed(pVCpu);
1471 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1472 return VINF_SUCCESS;
1473}
1474
1475
1476/**
1477 * VMCLEAR instruction execution worker.
1478 *
1479 * @param pVCpu The cross context virtual CPU structure.
1480 * @param cbInstr The instruction length.
1481 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
1482 * @param GCPtrVmcs The linear address of the VMCS pointer.
1483 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1484 * be NULL.
1485 *
1486 * @remarks Common VMX instruction checks are already expected to by the caller,
1487 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1488 */
1489IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1490 PCVMXVEXITINFO pExitInfo)
1491{
1492 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1493 {
1494 RT_NOREF(pExitInfo);
1495 /** @todo NSTVMX: intercept. */
1496 }
1497 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1498
1499 /* CPL. */
1500 if (pVCpu->iem.s.uCpl > 0)
1501 {
1502 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1503 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_Cpl;
1504 return iemRaiseGeneralProtectionFault0(pVCpu);
1505 }
1506
1507 /* Get the VMCS pointer from the location specified by the source memory operand. */
1508 RTGCPHYS GCPhysVmcs;
1509 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
1510 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1511 {
1512 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
1513 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrMap;
1514 return rcStrict;
1515 }
1516
1517 /* VMCS pointer alignment. */
1518 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
1519 {
1520 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
1521 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrAlign;
1522 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1523 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1524 return VINF_SUCCESS;
1525 }
1526
1527 /* VMCS physical-address width limits. */
1528 Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT);
1529 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
1530 {
1531 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
1532 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrWidth;
1533 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1534 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1535 return VINF_SUCCESS;
1536 }
1537
1538 /* VMCS is not the VMXON region. */
1539 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
1540 {
1541 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
1542 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrVmxon;
1543 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
1544 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1545 return VINF_SUCCESS;
1546 }
1547
1548 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
1549 restriction imposed by our implementation. */
1550 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
1551 {
1552 Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
1553 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrAbnormal;
1554 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
1555 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1556 return VINF_SUCCESS;
1557 }
1558
1559 /*
1560 * VMCLEAR allows committing and clearing any valid VMCS pointer.
1561 *
1562 * If the current VMCS is the one being cleared, set its state to 'clear' and commit
1563 * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
1564 * to 'clear'.
1565 */
1566 uint8_t const fVmcsStateClear = VMX_V_VMCS_STATE_CLEAR;
1567 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
1568 {
1569 Assert(GCPhysVmcs != NIL_RTGCPHYS); /* Paranoia. */
1570 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsStateClear;
1571 iemVmxCommitCurrentVmcsToMemory(pVCpu);
1572 Assert(!IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1573 }
1574 else
1575 {
1576 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPtrVmcs + RT_OFFSETOF(VMXVVMCS, fVmcsState),
1577 (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear));
1578 }
1579
1580 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_Success;
1581 iemVmxVmSucceed(pVCpu);
1582 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1583 return rcStrict;
1584}
1585
1586
1587/**
1588 * VMPTRST instruction execution worker.
1589 *
1590 * @param pVCpu The cross context virtual CPU structure.
1591 * @param cbInstr The instruction length.
1592 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
1593 * @param GCPtrVmcs The linear address of where to store the current VMCS
1594 * pointer.
1595 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1596 * be NULL.
1597 *
1598 * @remarks Common VMX instruction checks are already expected to by the caller,
1599 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1600 */
1601IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1602 PCVMXVEXITINFO pExitInfo)
1603{
1604 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1605 {
1606 RT_NOREF(pExitInfo);
1607 /** @todo NSTVMX: intercept. */
1608 }
1609 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1610
1611 /* CPL. */
1612 if (pVCpu->iem.s.uCpl > 0)
1613 {
1614 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1615 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrst_Cpl;
1616 return iemRaiseGeneralProtectionFault0(pVCpu);
1617 }
1618
1619 /* Set the VMCS pointer to the location specified by the destination memory operand. */
1620 AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
1621 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
1622 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1623 {
1624 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrst_Success;
1625 iemVmxVmSucceed(pVCpu);
1626 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1627 return rcStrict;
1628 }
1629
1630 Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1631 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrst_PtrMap;
1632 return rcStrict;
1633}
1634
1635
1636/**
1637 * VMPTRLD instruction execution worker.
1638 *
1639 * @param pVCpu The cross context virtual CPU structure.
1640 * @param cbInstr The instruction length.
1641 * @param GCPtrVmcs The linear address of the current VMCS pointer.
1642 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
1643 * be NULL.
1644 *
1645 * @remarks Common VMX instruction checks are already expected to by the caller,
1646 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1647 */
1648IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
1649 PCVMXVEXITINFO pExitInfo)
1650{
1651 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1652 {
1653 RT_NOREF(pExitInfo);
1654 /** @todo NSTVMX: intercept. */
1655 }
1656 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1657
1658 /* CPL. */
1659 if (pVCpu->iem.s.uCpl > 0)
1660 {
1661 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1662 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_Cpl;
1663 return iemRaiseGeneralProtectionFault0(pVCpu);
1664 }
1665
1666 /* Get the VMCS pointer from the location specified by the source memory operand. */
1667 RTGCPHYS GCPhysVmcs;
1668 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
1669 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1670 {
1671 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
1672 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrMap;
1673 return rcStrict;
1674 }
1675
1676 /* VMCS pointer alignment. */
1677 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
1678 {
1679 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
1680 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrAlign;
1681 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1682 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1683 return VINF_SUCCESS;
1684 }
1685
1686 /* VMCS physical-address width limits. */
1687 Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT);
1688 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
1689 {
1690 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
1691 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrWidth;
1692 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1693 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1694 return VINF_SUCCESS;
1695 }
1696
1697 /* VMCS is not the VMXON region. */
1698 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
1699 {
1700 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
1701 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrVmxon;
1702 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
1703 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1704 return VINF_SUCCESS;
1705 }
1706
1707 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
1708 restriction imposed by our implementation. */
1709 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
1710 {
1711 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
1712 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrAbnormal;
1713 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
1714 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1715 return VINF_SUCCESS;
1716 }
1717
1718 /* Read the VMCS revision ID from the VMCS. */
1719 VMXVMCSREVID VmcsRevId;
1720 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
1721 if (RT_FAILURE(rc))
1722 {
1723 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
1724 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrReadPhys;
1725 return rc;
1726 }
1727
1728 /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
1729 also check VMCS shadowing feature. */
1730 if ( VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
1731 || ( VmcsRevId.n.fIsShadowVmcs
1732 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
1733 {
1734 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
1735 {
1736 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
1737 VmcsRevId.n.u31RevisionId));
1738 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_VmcsRevId;
1739 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
1740 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1741 return VINF_SUCCESS;
1742 }
1743
1744 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
1745 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_ShadowVmcs;
1746 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
1747 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1748 return VINF_SUCCESS;
1749 }
1750
1751 /*
1752 * We only maintain only the current VMCS in our virtual CPU context (CPUMCTX). Therefore,
1753 * VMPTRLD shall always flush any existing current VMCS back to guest memory before loading
1754 * a new VMCS as current.
1755 */
1756 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
1757 {
1758 iemVmxCommitCurrentVmcsToMemory(pVCpu);
1759 IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
1760 }
1761 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_Success;
1762 iemVmxVmSucceed(pVCpu);
1763 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1764 return VINF_SUCCESS;
1765}
1766
1767
1768/**
1769 * VMXON instruction execution worker.
1770 *
1771 * @param pVCpu The cross context virtual CPU structure.
1772 * @param cbInstr The instruction length.
1773 * @param iEffSeg The effective segment register to use with @a
1774 * GCPtrVmxon.
1775 * @param GCPtrVmxon The linear address of the VMXON pointer.
1776 * @param pExitInfo Pointer to the VM-exit instruction information struct.
1777 * Optional, can be NULL.
1778 *
1779 * @remarks Common VMX instruction checks are already expected to by the caller,
1780 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1781 */
1782IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
1783 PCVMXVEXITINFO pExitInfo)
1784{
1785#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
1786 RT_NOREF5(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
1787 return VINF_EM_RAW_EMULATE_INSTR;
1788#else
1789 if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
1790 {
1791 /* CPL. */
1792 if (pVCpu->iem.s.uCpl > 0)
1793 {
1794 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1795 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cpl;
1796 return iemRaiseGeneralProtectionFault0(pVCpu);
1797 }
1798
1799 /* A20M (A20 Masked) mode. */
1800 if (!PGMPhysIsA20Enabled(pVCpu))
1801 {
1802 Log(("vmxon: A20M mode -> #GP(0)\n"));
1803 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_A20M;
1804 return iemRaiseGeneralProtectionFault0(pVCpu);
1805 }
1806
1807 /* CR0 fixed bits. */
1808 bool const fUnrestrictedGuest = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxUnrestrictedGuest;
1809 uint64_t const uCr0Fixed0 = fUnrestrictedGuest ? VMX_V_CR0_FIXED0_UX : VMX_V_CR0_FIXED0;
1810 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
1811 {
1812 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
1813 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cr0Fixed0;
1814 return iemRaiseGeneralProtectionFault0(pVCpu);
1815 }
1816
1817 /* CR4 fixed bits. */
1818 if ((pVCpu->cpum.GstCtx.cr4 & VMX_V_CR4_FIXED0) != VMX_V_CR4_FIXED0)
1819 {
1820 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
1821 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Cr4Fixed0;
1822 return iemRaiseGeneralProtectionFault0(pVCpu);
1823 }
1824
1825 /* Feature control MSR's LOCK and VMXON bits. */
1826 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
1827 if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
1828 {
1829 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
1830 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_MsrFeatCtl;
1831 return iemRaiseGeneralProtectionFault0(pVCpu);
1832 }
1833
1834 /* Get the VMXON pointer from the location specified by the source memory operand. */
1835 RTGCPHYS GCPhysVmxon;
1836 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
1837 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1838 {
1839 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
1840 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrMap;
1841 return rcStrict;
1842 }
1843
1844 /* VMXON region pointer alignment. */
1845 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
1846 {
1847 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
1848 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrAlign;
1849 iemVmxVmFailInvalid(pVCpu);
1850 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1851 return VINF_SUCCESS;
1852 }
1853
1854 /* VMXON physical-address width limits. */
1855 Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT);
1856 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
1857 {
1858 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
1859 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrWidth;
1860 iemVmxVmFailInvalid(pVCpu);
1861 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1862 return VINF_SUCCESS;
1863 }
1864
1865 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
1866 restriction imposed by our implementation. */
1867 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
1868 {
1869 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
1870 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrAbnormal;
1871 iemVmxVmFailInvalid(pVCpu);
1872 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1873 return VINF_SUCCESS;
1874 }
1875
1876 /* Read the VMCS revision ID from the VMXON region. */
1877 VMXVMCSREVID VmcsRevId;
1878 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
1879 if (RT_FAILURE(rc))
1880 {
1881 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
1882 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_PtrReadPhys;
1883 return rc;
1884 }
1885
1886 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
1887 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
1888 {
1889 /* Revision ID mismatch. */
1890 if (!VmcsRevId.n.fIsShadowVmcs)
1891 {
1892 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
1893 VmcsRevId.n.u31RevisionId));
1894 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmcsRevId;
1895 iemVmxVmFailInvalid(pVCpu);
1896 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1897 return VINF_SUCCESS;
1898 }
1899
1900 /* Shadow VMCS disallowed. */
1901 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
1902 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_ShadowVmcs;
1903 iemVmxVmFailInvalid(pVCpu);
1904 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1905 return VINF_SUCCESS;
1906 }
1907
1908 /*
1909 * Record that we're in VMX operation, block INIT, block and disable A20M.
1910 */
1911 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
1912 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
1913 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
1914 /** @todo NSTVMX: clear address-range monitoring. */
1915 /** @todo NSTVMX: Intel PT. */
1916 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_Success;
1917 iemVmxVmSucceed(pVCpu);
1918 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1919# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
1920 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
1921# else
1922 return VINF_SUCCESS;
1923# endif
1924 }
1925 else if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1926 {
1927 RT_NOREF(pExitInfo);
1928 /** @todo NSTVMX: intercept. */
1929 }
1930
1931 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1932
1933 /* CPL. */
1934 if (pVCpu->iem.s.uCpl > 0)
1935 {
1936 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1937 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmxRootCpl;
1938 return iemRaiseGeneralProtectionFault0(pVCpu);
1939 }
1940
1941 /* VMXON when already in VMX root mode. */
1942 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
1943 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxon_VmxAlreadyRoot;
1944 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1945 return VINF_SUCCESS;
1946#endif
1947}
1948
1949
1950/**
1951 * VMLAUNCH instruction execution worker.
1952 *
1953 * @param pVCpu The cross context virtual CPU structure.
1954 * @param cbInstr The instruction length.
1955 * @param pExitInfo Pointer to the VM-exit instruction information struct.
1956 * Optional, can be NULL.
1957 *
1958 * @remarks Common VMX instruction checks are already expected to by the caller,
1959 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1960 */
1961IEM_STATIC VBOXSTRICTRC iemVmxVmlaunch(PVMCPU pVCpu, uint8_t cbInstr, PCVMXVEXITINFO pExitInfo)
1962{
1963 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
1964 {
1965 RT_NOREF(pExitInfo);
1966 /** @todo NSTVMX: intercept. */
1967 }
1968 Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
1969
1970 /* CPL. */
1971 if (pVCpu->iem.s.uCpl > 0)
1972 {
1973 Log(("vmlaunch: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
1974 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmlaunch_Cpl;
1975 return iemRaiseGeneralProtectionFault0(pVCpu);
1976 }
1977
1978 /** @todo NSTVMX: VMLAUNCH impl. */
1979 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1980 return VERR_IEM_IPE_2;
1981}
1982
1983
1984/**
1985 * Implements 'VMXON'.
1986 */
1987IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
1988{
1989 return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
1990}
1991
1992
1993/**
1994 * Implements 'VMXOFF'.
1995 *
1996 * @remarks Common VMX instruction checks are already expected to by the caller,
1997 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
1998 */
1999IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
2000{
2001# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
2002 RT_NOREF2(pVCpu, cbInstr);
2003 return VINF_EM_RAW_EMULATE_INSTR;
2004# else
2005 if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
2006 {
2007 /** @todo NSTVMX: intercept. */
2008 }
2009
2010 /* CPL. */
2011 if (pVCpu->iem.s.uCpl > 0)
2012 {
2013 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
2014 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_Cpl;
2015 return iemRaiseGeneralProtectionFault0(pVCpu);
2016 }
2017
2018 /* Dual monitor treatment of SMIs and SMM. */
2019 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
2020 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
2021 {
2022 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
2023 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2024 return VINF_SUCCESS;
2025 }
2026
2027 /*
2028 * Record that we're no longer in VMX root operation, block INIT, block and disable A20M.
2029 */
2030 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
2031 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
2032
2033 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
2034 { /** @todo NSTVMX: Unblock SMI. */ }
2035 /** @todo NSTVMX: Unblock and enable A20M. */
2036 /** @todo NSTVMX: Clear address-range monitoring. */
2037
2038 pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmxoff_Success;
2039 iemVmxVmSucceed(pVCpu);
2040 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2041# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
2042 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
2043# else
2044 return VINF_SUCCESS;
2045# endif
2046# endif
2047}
2048
2049
2050/**
2051 * Implements 'VMLAUNCH'.
2052 */
2053IEM_CIMPL_DEF_0(iemCImpl_vmlaunch)
2054{
2055 return iemVmxVmlaunch(pVCpu, cbInstr, NULL /* pExitInfo */);
2056}
2057
2058
2059/**
2060 * Implements 'VMPTRLD'.
2061 */
2062IEM_CIMPL_DEF_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
2063{
2064 return iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
2065}
2066
2067
2068/**
2069 * Implements 'VMPTRST'.
2070 */
2071IEM_CIMPL_DEF_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
2072{
2073 return iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
2074}
2075
2076
2077/**
2078 * Implements 'VMCLEAR'.
2079 */
2080IEM_CIMPL_DEF_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
2081{
2082 return iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
2083}
2084
2085
2086/**
2087 * Implements 'VMWRITE' register.
2088 */
2089IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint32_t, uFieldEnc)
2090{
2091 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, IEMMODE_64BIT /* N/A */, u64Val, uFieldEnc,
2092 NULL /* pExitInfo */);
2093}
2094
2095
2096/**
2097 * Implements 'VMWRITE' memory.
2098 */
2099IEM_CIMPL_DEF_4(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrVal, uint32_t, uFieldEnc)
2100{
2101 return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrVal, uFieldEnc, NULL /* pExitInfo */);
2102}
2103
2104
2105/**
2106 * Implements 'VMREAD' 64-bit register.
2107 */
2108IEM_CIMPL_DEF_2(iemCImpl_vmread64_reg, uint64_t *, pu64Dst, uint32_t, uFieldEnc)
2109{
2110 return iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, NULL /* pExitInfo */);
2111}
2112
2113
2114/**
2115 * Implements 'VMREAD' 32-bit register.
2116 */
2117IEM_CIMPL_DEF_2(iemCImpl_vmread32_reg, uint32_t *, pu32Dst, uint32_t, uFieldEnc)
2118{
2119 return iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, NULL /* pExitInfo */);
2120}
2121
2122
2123/**
2124 * Implements 'VMREAD' memory.
2125 */
2126IEM_CIMPL_DEF_4(iemCImpl_vmread_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrDst, uint32_t, uFieldEnc)
2127{
2128 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, NULL /* pExitInfo */);
2129}
2130
2131#endif
2132
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette