VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 74591

Last change on this file since 74591 was 74591, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 VM-exit bits; LGDT, SGDT intercepts.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 241.2 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 74591 2018-10-03 05:04:09Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
20/** @todo NSTVMX: The following VM-exit intercepts are pending:
21 * VMX_EXIT_XCPT_OR_NMI
22 * VMX_EXIT_EXT_INT
23 * VMX_EXIT_TRIPLE_FAULT
24 * VMX_EXIT_INIT_SIGNAL
25 * VMX_EXIT_SIPI
26 * VMX_EXIT_IO_SMI
27 * VMX_EXIT_SMI
28 * VMX_EXIT_INT_WINDOW
29 * VMX_EXIT_NMI_WINDOW
30 * VMX_EXIT_TASK_SWITCH
31 * VMX_EXIT_GETSEC
32 * VMX_EXIT_INVD
33 * VMX_EXIT_RSM
34 * VMX_EXIT_MOV_CRX
35 * VMX_EXIT_MOV_DRX
36 * VMX_EXIT_IO_INSTR
37 * VMX_EXIT_MWAIT
38 * VMX_EXIT_MTF
39 * VMX_EXIT_MONITOR (APIC access VM-exit caused by MONITOR pending)
40 * VMX_EXIT_PAUSE
41 * VMX_EXIT_ERR_MACHINE_CHECK
42 * VMX_EXIT_TPR_BELOW_THRESHOLD
43 * VMX_EXIT_APIC_ACCESS
44 * VMX_EXIT_VIRTUALIZED_EOI
45 * VMX_EXIT_LDTR_TR_ACCESS
46 * VMX_EXIT_EPT_VIOLATION
47 * VMX_EXIT_EPT_MISCONFIG
48 * VMX_EXIT_INVEPT
49 * VMX_EXIT_PREEMPT_TIMER
50 * VMX_EXIT_INVVPID
51 * VMX_EXIT_WBINVD
52 * VMX_EXIT_XSETBV
53 * VMX_EXIT_APIC_WRITE
54 * VMX_EXIT_RDRAND
55 * VMX_EXIT_VMFUNC
56 * VMX_EXIT_ENCLS
57 * VMX_EXIT_RDSEED
58 * VMX_EXIT_PML_FULL
59 * VMX_EXIT_XSAVES
60 * VMX_EXIT_XRSTORS
61 */
62
63/**
64 * Map of VMCS field encodings to their virtual-VMCS structure offsets.
65 *
66 * The first array dimension is VMCS field encoding of Width OR'ed with Type and the
67 * second dimension is the Index, see VMXVMCSFIELDENC.
68 */
69uint16_t const g_aoffVmcsMap[16][VMX_V_VMCS_MAX_INDEX + 1] =
70{
71 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
72 {
73 /* 0 */ RT_OFFSETOF(VMXVVMCS, u16Vpid),
74 /* 1 */ RT_OFFSETOF(VMXVVMCS, u16PostIntNotifyVector),
75 /* 2 */ RT_OFFSETOF(VMXVVMCS, u16EptpIndex),
76 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
77 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
78 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
79 },
80 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
81 {
82 /* 0-7 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
83 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
84 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
85 /* 24-25 */ UINT16_MAX, UINT16_MAX
86 },
87 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
88 {
89 /* 0 */ RT_OFFSETOF(VMXVVMCS, GuestEs),
90 /* 1 */ RT_OFFSETOF(VMXVVMCS, GuestCs),
91 /* 2 */ RT_OFFSETOF(VMXVVMCS, GuestSs),
92 /* 3 */ RT_OFFSETOF(VMXVVMCS, GuestDs),
93 /* 4 */ RT_OFFSETOF(VMXVVMCS, GuestFs),
94 /* 5 */ RT_OFFSETOF(VMXVVMCS, GuestGs),
95 /* 6 */ RT_OFFSETOF(VMXVVMCS, GuestLdtr),
96 /* 7 */ RT_OFFSETOF(VMXVVMCS, GuestTr),
97 /* 8 */ RT_OFFSETOF(VMXVVMCS, u16GuestIntStatus),
98 /* 9 */ RT_OFFSETOF(VMXVVMCS, u16PmlIndex),
99 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
100 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
101 },
102 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
103 {
104 /* 0 */ RT_OFFSETOF(VMXVVMCS, HostEs),
105 /* 1 */ RT_OFFSETOF(VMXVVMCS, HostCs),
106 /* 2 */ RT_OFFSETOF(VMXVVMCS, HostSs),
107 /* 3 */ RT_OFFSETOF(VMXVVMCS, HostDs),
108 /* 4 */ RT_OFFSETOF(VMXVVMCS, HostFs),
109 /* 5 */ RT_OFFSETOF(VMXVVMCS, HostGs),
110 /* 6 */ RT_OFFSETOF(VMXVVMCS, HostTr),
111 /* 7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
112 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
113 /* 23-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
114 },
115 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
116 {
117 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapA),
118 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
119 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
120 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64AddrExitMsrStore),
121 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64AddrExitMsrLoad),
122 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64AddrEntryMsrLoad),
123 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
124 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64AddrPml),
125 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64TscOffset),
126 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64AddrVirtApic),
127 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64AddrApicAccess),
128 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64AddrPostedIntDesc),
129 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64VmFuncCtls),
130 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64EptpPtr),
131 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap0),
132 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap1),
133 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap2),
134 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap3),
135 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64AddrEptpList),
136 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmreadBitmap),
137 /* 20 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmwriteBitmap),
138 /* 21 */ RT_OFFSETOF(VMXVVMCS, u64AddrXcptVeInfo),
139 /* 22 */ RT_OFFSETOF(VMXVVMCS, u64AddrXssBitmap),
140 /* 23 */ RT_OFFSETOF(VMXVVMCS, u64AddrEnclsBitmap),
141 /* 24 */ UINT16_MAX,
142 /* 25 */ RT_OFFSETOF(VMXVVMCS, u64TscMultiplier)
143 },
144 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
145 {
146 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64RoGuestPhysAddr),
147 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
148 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
149 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
150 /* 25 */ UINT16_MAX
151 },
152 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
153 {
154 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
155 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
156 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestPatMsr),
157 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEferMsr),
158 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
159 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte0),
160 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte1),
161 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte2),
162 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte3),
163 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
164 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
165 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
166 },
167 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
168 {
169 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostPatMsr),
170 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostEferMsr),
171 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
172 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
173 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
174 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
175 },
176 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
177 {
178 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32PinCtls),
179 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls),
180 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32XcptBitmap),
181 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMask),
182 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMatch),
183 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32Cr3TargetCount),
184 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32ExitCtls),
185 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
186 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
187 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32EntryCtls),
188 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
189 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32EntryIntInfo),
190 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
191 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32EntryInstrLen),
192 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32TprThreshold),
193 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls2),
194 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32PleGap),
195 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32PleWindow),
196 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
197 },
198 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
199 {
200 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32RoVmInstrError),
201 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32RoExitReason),
202 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32RoExitIntInfo),
203 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32RoExitErrCode),
204 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
205 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
206 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32RoExitInstrLen),
207 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32RoExitInstrInfo),
208 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
209 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
210 /* 24-25 */ UINT16_MAX, UINT16_MAX
211 },
212 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
213 {
214 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
215 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsLimit),
216 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsLimit),
217 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsLimit),
218 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
219 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsLimit),
220 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsLimit),
221 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
222 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrLimit),
223 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
224 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
225 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsAttr),
226 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsAttr),
227 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsAttr),
228 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsAttr),
229 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsAttr),
230 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsAttr),
231 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
232 /* 18 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrAttr),
233 /* 19 */ RT_OFFSETOF(VMXVVMCS, u32GuestIntrState),
234 /* 20 */ RT_OFFSETOF(VMXVVMCS, u32GuestActivityState),
235 /* 21 */ RT_OFFSETOF(VMXVVMCS, u32GuestSmBase),
236 /* 22 */ RT_OFFSETOF(VMXVVMCS, u32GuestSysenterCS),
237 /* 23 */ RT_OFFSETOF(VMXVVMCS, u32PreemptTimer),
238 /* 24-25 */ UINT16_MAX, UINT16_MAX
239 },
240 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
241 {
242 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32HostSysenterCs),
243 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
244 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
245 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
246 /* 25 */ UINT16_MAX
247 },
248 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_CONTROL: */
249 {
250 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64Cr0Mask),
251 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64Cr4Mask),
252 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
253 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
254 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target0),
255 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target1),
256 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target2),
257 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target3),
258 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
259 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
260 /* 24-25 */ UINT16_MAX, UINT16_MAX
261 },
262 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
263 {
264 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64RoExitQual),
265 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64RoIoRcx),
266 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64RoIoRsi),
267 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64RoIoRdi),
268 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64RoIoRip),
269 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64RoGuestLinearAddr),
270 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
271 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
272 /* 22-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
273 },
274 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
275 {
276 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr0),
277 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr3),
278 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr4),
279 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEsBase),
280 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestCsBase),
281 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestSsBase),
282 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestDsBase),
283 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestFsBase),
284 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestGsBase),
285 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestLdtrBase),
286 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64GuestTrBase),
287 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64GuestGdtrBase),
288 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64GuestIdtrBase),
289 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64GuestDr7),
290 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64GuestRsp),
291 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64GuestRip),
292 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64GuestRFlags),
293 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpt),
294 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
295 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEip),
296 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
297 },
298 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_HOST_STATE: */
299 {
300 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostCr0),
301 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostCr3),
302 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostCr4),
303 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64HostFsBase),
304 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64HostGsBase),
305 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64HostTrBase),
306 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64HostGdtrBase),
307 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64HostIdtrBase),
308 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEsp),
309 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEip),
310 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64HostRsp),
311 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64HostRip),
312 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
313 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
314 }
315};
316
317
318/**
319 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
320 * relative offsets.
321 */
322# ifdef IEM_WITH_CODE_TLB
323# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
324# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
325# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
326# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
327# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
328# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
329# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
330# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
331# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
332# else /* !IEM_WITH_CODE_TLB */
333# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
334 do \
335 { \
336 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
337 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
338 } while (0)
339
340# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
341
342# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
343 do \
344 { \
345 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
346 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
347 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
348 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
349 } while (0)
350
351# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
352 do \
353 { \
354 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
355 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
356 } while (0)
357
358# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
359 do \
360 { \
361 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
362 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
363 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
364 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
365 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
366 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
367 } while (0)
368
369# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
370 do \
371 { \
372 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
373 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
374 } while (0)
375
376# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
377 do \
378 { \
379 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
380 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
381 } while (0)
382
383# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
384 do \
385 { \
386 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
387 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
388 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
389 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
390 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
391 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
392 } while (0)
393# endif /* !IEM_WITH_CODE_TLB */
394
395/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
396#define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
397
398/** Whether a shadow VMCS is present for the given VCPU. */
399#define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
400
401/** Gets the VMXON region pointer. */
402#define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
403
404/** Gets the guest-physical address of the current VMCS for the given VCPU. */
405#define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
406
407/** Whether a current VMCS is present for the given VCPU. */
408#define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
409
410/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
411#define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
412 do \
413 { \
414 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
415 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
416 } while (0)
417
418/** Clears any current VMCS for the given VCPU. */
419#define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
420 do \
421 { \
422 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
423 } while (0)
424
425/** Check for VMX instructions requiring to be in VMX operation.
426 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs udpating. */
427#define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
428 do \
429 { \
430 if (IEM_VMX_IS_ROOT_MODE(a_pVCpu)) \
431 { /* likely */ } \
432 else \
433 { \
434 Log((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
435 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
436 return iemRaiseUndefinedOpcode(a_pVCpu); \
437 } \
438 } while (0)
439
440/** Marks a VM-entry failure with a diagnostic reason, logs and returns. */
441#define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag) \
442 do \
443 { \
444 Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_VmxDiag), \
445 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
446 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
447 return VERR_VMX_VMENTRY_FAILED; \
448 } while (0)
449
450/** Marks a VM-exit failure with a diagnostic reason, logs and returns. */
451#define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \
452 do \
453 { \
454 Log(("VM-exit failed! uExitReason=%u enmDiag=%u (%s) -> %s\n", (a_uExitReason), (a_VmxDiag), \
455 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
456 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
457 return VERR_VMX_VMEXIT_FAILED; \
458 } while (0)
459
460
461
462/**
463 * Returns whether the given VMCS field is valid and supported by our emulation.
464 *
465 * @param pVCpu The cross context virtual CPU structure.
466 * @param u64FieldEnc The VMCS field encoding.
467 *
468 * @remarks This takes into account the CPU features exposed to the guest.
469 */
470IEM_STATIC bool iemVmxIsVmcsFieldValid(PVMCPU pVCpu, uint64_t u64FieldEnc)
471{
472 uint32_t const uFieldEncHi = RT_HI_U32(u64FieldEnc);
473 uint32_t const uFieldEncLo = RT_LO_U32(u64FieldEnc);
474 if (!uFieldEncHi)
475 { /* likely */ }
476 else
477 return false;
478
479 PCCPUMFEATURES pFeat = IEM_GET_GUEST_CPU_FEATURES(pVCpu);
480 switch (uFieldEncLo)
481 {
482 /*
483 * 16-bit fields.
484 */
485 /* Control fields. */
486 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
487 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
488 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
489
490 /* Guest-state fields. */
491 case VMX_VMCS16_GUEST_ES_SEL:
492 case VMX_VMCS16_GUEST_CS_SEL:
493 case VMX_VMCS16_GUEST_SS_SEL:
494 case VMX_VMCS16_GUEST_DS_SEL:
495 case VMX_VMCS16_GUEST_FS_SEL:
496 case VMX_VMCS16_GUEST_GS_SEL:
497 case VMX_VMCS16_GUEST_LDTR_SEL:
498 case VMX_VMCS16_GUEST_TR_SEL:
499 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
500 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
501
502 /* Host-state fields. */
503 case VMX_VMCS16_HOST_ES_SEL:
504 case VMX_VMCS16_HOST_CS_SEL:
505 case VMX_VMCS16_HOST_SS_SEL:
506 case VMX_VMCS16_HOST_DS_SEL:
507 case VMX_VMCS16_HOST_FS_SEL:
508 case VMX_VMCS16_HOST_GS_SEL:
509 case VMX_VMCS16_HOST_TR_SEL: return true;
510
511 /*
512 * 64-bit fields.
513 */
514 /* Control fields. */
515 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
516 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
517 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
518 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
519 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
520 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
521 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
522 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
523 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
524 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
525 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
526 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
527 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
528 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
529 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
530 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
531 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
532 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
533 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
534 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
535 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
536 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
537 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
538 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
539 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
540 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
541 case VMX_VMCS64_CTRL_EPTP_FULL:
542 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
543 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
544 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
545 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
546 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
547 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
548 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
549 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
550 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
551 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
552 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
553 {
554 uint64_t const uVmFuncMsr = CPUMGetGuestIa32VmxVmFunc(pVCpu);
555 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
556 }
557 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
558 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
559 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
560 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
561 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
562 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
563 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
564 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
565 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
566 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
567 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
568 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
569
570 /* Read-only data fields. */
571 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
572 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
573
574 /* Guest-state fields. */
575 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
576 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
577 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
578 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
579 case VMX_VMCS64_GUEST_PAT_FULL:
580 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
581 case VMX_VMCS64_GUEST_EFER_FULL:
582 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
583 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
584 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
585 case VMX_VMCS64_GUEST_PDPTE0_FULL:
586 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
587 case VMX_VMCS64_GUEST_PDPTE1_FULL:
588 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
589 case VMX_VMCS64_GUEST_PDPTE2_FULL:
590 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
591 case VMX_VMCS64_GUEST_PDPTE3_FULL:
592 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
593 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
594 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
595
596 /* Host-state fields. */
597 case VMX_VMCS64_HOST_PAT_FULL:
598 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
599 case VMX_VMCS64_HOST_EFER_FULL:
600 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
601 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
602 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
603
604 /*
605 * 32-bit fields.
606 */
607 /* Control fields. */
608 case VMX_VMCS32_CTRL_PIN_EXEC:
609 case VMX_VMCS32_CTRL_PROC_EXEC:
610 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
611 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
612 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
613 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
614 case VMX_VMCS32_CTRL_EXIT:
615 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
616 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
617 case VMX_VMCS32_CTRL_ENTRY:
618 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
619 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
620 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
621 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
622 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
623 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
624 case VMX_VMCS32_CTRL_PLE_GAP:
625 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
626
627 /* Read-only data fields. */
628 case VMX_VMCS32_RO_VM_INSTR_ERROR:
629 case VMX_VMCS32_RO_EXIT_REASON:
630 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
631 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
632 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
633 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
634 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
635 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
636
637 /* Guest-state fields. */
638 case VMX_VMCS32_GUEST_ES_LIMIT:
639 case VMX_VMCS32_GUEST_CS_LIMIT:
640 case VMX_VMCS32_GUEST_SS_LIMIT:
641 case VMX_VMCS32_GUEST_DS_LIMIT:
642 case VMX_VMCS32_GUEST_FS_LIMIT:
643 case VMX_VMCS32_GUEST_GS_LIMIT:
644 case VMX_VMCS32_GUEST_LDTR_LIMIT:
645 case VMX_VMCS32_GUEST_TR_LIMIT:
646 case VMX_VMCS32_GUEST_GDTR_LIMIT:
647 case VMX_VMCS32_GUEST_IDTR_LIMIT:
648 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
649 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
650 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
651 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
652 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
653 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
654 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
655 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
656 case VMX_VMCS32_GUEST_INT_STATE:
657 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
658 case VMX_VMCS32_GUEST_SMBASE:
659 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
660 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
661
662 /* Host-state fields. */
663 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
664
665 /*
666 * Natural-width fields.
667 */
668 /* Control fields. */
669 case VMX_VMCS_CTRL_CR0_MASK:
670 case VMX_VMCS_CTRL_CR4_MASK:
671 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
672 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
673 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
674 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
675 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
676 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
677
678 /* Read-only data fields. */
679 case VMX_VMCS_RO_EXIT_QUALIFICATION:
680 case VMX_VMCS_RO_IO_RCX:
681 case VMX_VMCS_RO_IO_RSX:
682 case VMX_VMCS_RO_IO_RDI:
683 case VMX_VMCS_RO_IO_RIP:
684 case VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR: return true;
685
686 /* Guest-state fields. */
687 case VMX_VMCS_GUEST_CR0:
688 case VMX_VMCS_GUEST_CR3:
689 case VMX_VMCS_GUEST_CR4:
690 case VMX_VMCS_GUEST_ES_BASE:
691 case VMX_VMCS_GUEST_CS_BASE:
692 case VMX_VMCS_GUEST_SS_BASE:
693 case VMX_VMCS_GUEST_DS_BASE:
694 case VMX_VMCS_GUEST_FS_BASE:
695 case VMX_VMCS_GUEST_GS_BASE:
696 case VMX_VMCS_GUEST_LDTR_BASE:
697 case VMX_VMCS_GUEST_TR_BASE:
698 case VMX_VMCS_GUEST_GDTR_BASE:
699 case VMX_VMCS_GUEST_IDTR_BASE:
700 case VMX_VMCS_GUEST_DR7:
701 case VMX_VMCS_GUEST_RSP:
702 case VMX_VMCS_GUEST_RIP:
703 case VMX_VMCS_GUEST_RFLAGS:
704 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
705 case VMX_VMCS_GUEST_SYSENTER_ESP:
706 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
707
708 /* Host-state fields. */
709 case VMX_VMCS_HOST_CR0:
710 case VMX_VMCS_HOST_CR3:
711 case VMX_VMCS_HOST_CR4:
712 case VMX_VMCS_HOST_FS_BASE:
713 case VMX_VMCS_HOST_GS_BASE:
714 case VMX_VMCS_HOST_TR_BASE:
715 case VMX_VMCS_HOST_GDTR_BASE:
716 case VMX_VMCS_HOST_IDTR_BASE:
717 case VMX_VMCS_HOST_SYSENTER_ESP:
718 case VMX_VMCS_HOST_SYSENTER_EIP:
719 case VMX_VMCS_HOST_RSP:
720 case VMX_VMCS_HOST_RIP: return true;
721 }
722
723 return false;
724}
725
726
727/**
728 * Gets a host selector from the VMCS.
729 *
730 * @param pVmcs Pointer to the virtual VMCS.
731 * @param iSelReg The index of the segment register (X86_SREG_XXX).
732 */
733DECLINLINE(RTSEL) iemVmxVmcsGetHostSelReg(PCVMXVVMCS pVmcs, uint8_t iSegReg)
734{
735 Assert(iSegReg < X86_SREG_COUNT);
736 RTSEL HostSel;
737 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
738 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
739 uint8_t const uWidthType = (uWidth << 2) | uType;
740 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
741 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
742 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
743 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
744 uint8_t const *pbField = pbVmcs + offField;
745 HostSel = *(uint16_t *)pbField;
746 return HostSel;
747}
748
749
750/**
751 * Sets a guest segment register in the VMCS.
752 *
753 * @param pVmcs Pointer to the virtual VMCS.
754 * @param iSegReg The index of the segment register (X86_SREG_XXX).
755 * @param pSelReg Pointer to the segment register.
756 */
757IEM_STATIC void iemVmxVmcsSetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCCPUMSELREG pSelReg)
758{
759 Assert(pSelReg);
760 Assert(iSegReg < X86_SREG_COUNT);
761
762 /* Selector. */
763 {
764 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
765 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
766 uint8_t const uWidthType = (uWidth << 2) | uType;
767 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
768 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
769 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
770 uint8_t *pbVmcs = (uint8_t *)pVmcs;
771 uint8_t *pbField = pbVmcs + offField;
772 *(uint16_t *)pbField = pSelReg->Sel;
773 }
774
775 /* Limit. */
776 {
777 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
778 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
779 uint8_t const uWidthType = (uWidth << 2) | uType;
780 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
781 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
782 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
783 uint8_t *pbVmcs = (uint8_t *)pVmcs;
784 uint8_t *pbField = pbVmcs + offField;
785 *(uint32_t *)pbField = pSelReg->u32Limit;
786 }
787
788 /* Base. */
789 {
790 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
791 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
792 uint8_t const uWidthType = (uWidth << 2) | uType;
793 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
794 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
795 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
796 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
797 uint8_t const *pbField = pbVmcs + offField;
798 *(uint64_t *)pbField = pSelReg->u64Base;
799 }
800
801 /* Attributes. */
802 {
803 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
804 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
805 | X86DESCATTR_UNUSABLE;
806 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
807 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
808 uint8_t const uWidthType = (uWidth << 2) | uType;
809 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
810 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
811 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
812 uint8_t *pbVmcs = (uint8_t *)pVmcs;
813 uint8_t *pbField = pbVmcs + offField;
814 *(uint32_t *)pbField = pSelReg->Attr.u & fValidAttrMask;
815 }
816}
817
818
819/**
820 * Gets a guest segment register from the VMCS.
821 *
822 * @returns VBox status code.
823 * @param pVmcs Pointer to the virtual VMCS.
824 * @param iSegReg The index of the segment register (X86_SREG_XXX).
825 * @param pSelReg Where to store the segment register (only updated when
826 * VINF_SUCCESS is returned).
827 *
828 * @remarks Warning! This does not validate the contents of the retreived segment
829 * register.
830 */
831IEM_STATIC int iemVmxVmcsGetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCPUMSELREG pSelReg)
832{
833 Assert(pSelReg);
834 Assert(iSegReg < X86_SREG_COUNT);
835
836 /* Selector. */
837 uint16_t u16Sel;
838 {
839 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
840 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
841 uint8_t const uWidthType = (uWidth << 2) | uType;
842 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
843 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
844 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
845 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
846 uint8_t const *pbField = pbVmcs + offField;
847 u16Sel = *(uint16_t *)pbField;
848 }
849
850 /* Limit. */
851 uint32_t u32Limit;
852 {
853 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
854 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
855 uint8_t const uWidthType = (uWidth << 2) | uType;
856 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
857 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
858 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
859 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
860 uint8_t const *pbField = pbVmcs + offField;
861 u32Limit = *(uint32_t *)pbField;
862 }
863
864 /* Base. */
865 uint64_t u64Base;
866 {
867 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
868 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
869 uint8_t const uWidthType = (uWidth << 2) | uType;
870 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
871 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
872 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
873 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
874 uint8_t const *pbField = pbVmcs + offField;
875 u64Base = *(uint64_t *)pbField;
876 /** @todo NSTVMX: Should we zero out high bits here for 32-bit virtual CPUs? */
877 }
878
879 /* Attributes. */
880 uint32_t u32Attr;
881 {
882 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
883 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
884 uint8_t const uWidthType = (uWidth << 2) | uType;
885 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
886 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
887 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
888 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
889 uint8_t const *pbField = pbVmcs + offField;
890 u32Attr = *(uint32_t *)pbField;
891 }
892
893 pSelReg->Sel = u16Sel;
894 pSelReg->ValidSel = u16Sel;
895 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
896 pSelReg->u32Limit = u32Limit;
897 pSelReg->u64Base = u64Base;
898 pSelReg->Attr.u = u32Attr;
899 return VINF_SUCCESS;
900}
901
902
903/**
904 * Gets VM-exit instruction information along with any displacement for an
905 * instruction VM-exit.
906 *
907 * @returns The VM-exit instruction information.
908 * @param pVCpu The cross context virtual CPU structure.
909 * @param uExitReason The VM-exit reason.
910 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_XXX).
911 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
912 * NULL.
913 */
914IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, PRTGCPTR pGCPtrDisp)
915{
916 RTGCPTR GCPtrDisp;
917 VMXEXITINSTRINFO ExitInstrInfo;
918 ExitInstrInfo.u = 0;
919
920 /*
921 * Get and parse the ModR/M byte from our decoded opcodes.
922 */
923 uint8_t bRm;
924 uint8_t const offModRm = pVCpu->iem.s.offModRm;
925 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
926 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
927 {
928 /*
929 * ModR/M indicates register addressing.
930 *
931 * The primary/secondary register operands are reported in the iReg1 or iReg2
932 * fields depending on whether it is a read/write form.
933 */
934 uint8_t idxReg1;
935 uint8_t idxReg2;
936 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
937 {
938 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
939 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
940 }
941 else
942 {
943 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
944 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
945 }
946 ExitInstrInfo.All.u2Scaling = 0;
947 ExitInstrInfo.All.iReg1 = idxReg1;
948 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
949 ExitInstrInfo.All.fIsRegOperand = 1;
950 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
951 ExitInstrInfo.All.iSegReg = 0;
952 ExitInstrInfo.All.iIdxReg = 0;
953 ExitInstrInfo.All.fIdxRegInvalid = 1;
954 ExitInstrInfo.All.iBaseReg = 0;
955 ExitInstrInfo.All.fBaseRegInvalid = 1;
956 ExitInstrInfo.All.iReg2 = idxReg2;
957
958 /* Displacement not applicable for register addressing. */
959 GCPtrDisp = 0;
960 }
961 else
962 {
963 /*
964 * ModR/M indicates memory addressing.
965 */
966 uint8_t uScale = 0;
967 bool fBaseRegValid = false;
968 bool fIdxRegValid = false;
969 uint8_t iBaseReg = 0;
970 uint8_t iIdxReg = 0;
971 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
972 {
973 /*
974 * Parse the ModR/M, displacement for 16-bit addressing mode.
975 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
976 */
977 uint16_t u16Disp = 0;
978 uint8_t const offDisp = offModRm + sizeof(bRm);
979 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
980 {
981 /* Displacement without any registers. */
982 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
983 }
984 else
985 {
986 /* Register (index and base). */
987 switch (bRm & X86_MODRM_RM_MASK)
988 {
989 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
990 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
991 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
992 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
993 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
994 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
995 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
996 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
997 }
998
999 /* Register + displacement. */
1000 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1001 {
1002 case 0: break;
1003 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
1004 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
1005 default:
1006 {
1007 /* Register addressing, handled at the beginning. */
1008 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
1009 break;
1010 }
1011 }
1012 }
1013
1014 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
1015 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
1016 }
1017 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
1018 {
1019 /*
1020 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
1021 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
1022 */
1023 uint32_t u32Disp = 0;
1024 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
1025 {
1026 /* Displacement without any registers. */
1027 uint8_t const offDisp = offModRm + sizeof(bRm);
1028 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
1029 }
1030 else
1031 {
1032 /* Register (and perhaps scale, index and base). */
1033 uint8_t offDisp = offModRm + sizeof(bRm);
1034 iBaseReg = (bRm & X86_MODRM_RM_MASK);
1035 if (iBaseReg == 4)
1036 {
1037 /* An SIB byte follows the ModR/M byte, parse it. */
1038 uint8_t bSib;
1039 uint8_t const offSib = offModRm + sizeof(bRm);
1040 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
1041
1042 /* A displacement may follow SIB, update its offset. */
1043 offDisp += sizeof(bSib);
1044
1045 /* Get the scale. */
1046 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
1047
1048 /* Get the index register. */
1049 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
1050 fIdxRegValid = RT_BOOL(iIdxReg != 4);
1051
1052 /* Get the base register. */
1053 iBaseReg = bSib & X86_SIB_BASE_MASK;
1054 fBaseRegValid = true;
1055 if (iBaseReg == 5)
1056 {
1057 if ((bRm & X86_MODRM_MOD_MASK) == 0)
1058 {
1059 /* Mod is 0 implies a 32-bit displacement with no base. */
1060 fBaseRegValid = false;
1061 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
1062 }
1063 else
1064 {
1065 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
1066 iBaseReg = X86_GREG_xBP;
1067 }
1068 }
1069 }
1070
1071 /* Register + displacement. */
1072 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1073 {
1074 case 0: /* Handled above */ break;
1075 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
1076 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
1077 default:
1078 {
1079 /* Register addressing, handled at the beginning. */
1080 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
1081 break;
1082 }
1083 }
1084 }
1085
1086 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
1087 }
1088 else
1089 {
1090 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
1091
1092 /*
1093 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
1094 * See Intel instruction spec. 2.2 "IA-32e Mode".
1095 */
1096 uint64_t u64Disp = 0;
1097 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
1098 if (fRipRelativeAddr)
1099 {
1100 /*
1101 * RIP-relative addressing mode.
1102 *
1103 * The displacment is 32-bit signed implying an offset range of +/-2G.
1104 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
1105 */
1106 uint8_t const offDisp = offModRm + sizeof(bRm);
1107 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
1108 }
1109 else
1110 {
1111 uint8_t offDisp = offModRm + sizeof(bRm);
1112
1113 /*
1114 * Register (and perhaps scale, index and base).
1115 *
1116 * REX.B extends the most-significant bit of the base register. However, REX.B
1117 * is ignored while determining whether an SIB follows the opcode. Hence, we
1118 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
1119 *
1120 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
1121 */
1122 iBaseReg = (bRm & X86_MODRM_RM_MASK);
1123 if (iBaseReg == 4)
1124 {
1125 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
1126 uint8_t bSib;
1127 uint8_t const offSib = offModRm + sizeof(bRm);
1128 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
1129
1130 /* Displacement may follow SIB, update its offset. */
1131 offDisp += sizeof(bSib);
1132
1133 /* Get the scale. */
1134 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
1135
1136 /* Get the index. */
1137 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
1138 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
1139
1140 /* Get the base. */
1141 iBaseReg = (bSib & X86_SIB_BASE_MASK);
1142 fBaseRegValid = true;
1143 if (iBaseReg == 5)
1144 {
1145 if ((bRm & X86_MODRM_MOD_MASK) == 0)
1146 {
1147 /* Mod is 0 implies a signed 32-bit displacement with no base. */
1148 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
1149 }
1150 else
1151 {
1152 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
1153 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
1154 }
1155 }
1156 }
1157 iBaseReg |= pVCpu->iem.s.uRexB;
1158
1159 /* Register + displacement. */
1160 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1161 {
1162 case 0: /* Handled above */ break;
1163 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
1164 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
1165 default:
1166 {
1167 /* Register addressing, handled at the beginning. */
1168 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
1169 break;
1170 }
1171 }
1172 }
1173
1174 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
1175 }
1176
1177 /*
1178 * The primary or secondary register operand is reported in iReg2 depending
1179 * on whether the primary operand is in read/write form.
1180 */
1181 uint8_t idxReg2;
1182 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
1183 {
1184 idxReg2 = bRm & X86_MODRM_RM_MASK;
1185 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
1186 idxReg2 |= pVCpu->iem.s.uRexB;
1187 }
1188 else
1189 {
1190 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK;
1191 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
1192 idxReg2 |= pVCpu->iem.s.uRexReg;
1193 }
1194 ExitInstrInfo.All.u2Scaling = uScale;
1195 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */
1196 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
1197 ExitInstrInfo.All.fIsRegOperand = 0;
1198 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
1199 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
1200 ExitInstrInfo.All.iIdxReg = iIdxReg;
1201 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
1202 ExitInstrInfo.All.iBaseReg = iBaseReg;
1203 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
1204 ExitInstrInfo.All.iReg2 = idxReg2;
1205 }
1206
1207 /*
1208 * Handle exceptions to the norm for certain instructions.
1209 * (e.g. some instructions convey an instruction identity in place of iReg2).
1210 */
1211 switch (uExitReason)
1212 {
1213 case VMX_EXIT_GDTR_IDTR_ACCESS:
1214 {
1215 Assert(VMXINSTRID_IS_VALID(uInstrId));
1216 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
1217 ExitInstrInfo.GdtIdt.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
1218 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
1219 break;
1220 }
1221
1222 case VMX_EXIT_LDTR_TR_ACCESS:
1223 {
1224 Assert(VMXINSTRID_IS_VALID(uInstrId));
1225 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
1226 ExitInstrInfo.LdtTr.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
1227 ExitInstrInfo.LdtTr.u2Undef0 = 0;
1228 break;
1229 }
1230
1231 case VMX_EXIT_RDRAND:
1232 case VMX_EXIT_RDSEED:
1233 {
1234 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
1235 break;
1236 }
1237 }
1238
1239 /* Update displacement and return the constructed VM-exit instruction information field. */
1240 if (pGCPtrDisp)
1241 *pGCPtrDisp = GCPtrDisp;
1242
1243 return ExitInstrInfo.u;
1244}
1245
1246
1247/**
1248 * Sets the VM-instruction error VMCS field.
1249 *
1250 * @param pVCpu The cross context virtual CPU structure.
1251 * @param enmInsErr The VM-instruction error.
1252 */
1253DECL_FORCE_INLINE(void) iemVmxVmcsSetVmInstrErr(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1254{
1255 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1256 pVmcs->u32RoVmInstrError = enmInsErr;
1257}
1258
1259
1260/**
1261 * Sets the VM-exit qualification VMCS field.
1262 *
1263 * @param pVCpu The cross context virtual CPU structure.
1264 * @param uExitQual The VM-exit qualification field.
1265 */
1266DECL_FORCE_INLINE(void) iemVmxVmcsSetExitQual(PVMCPU pVCpu, uint64_t uExitQual)
1267{
1268 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1269 pVmcs->u64RoExitQual.u = uExitQual;
1270}
1271
1272
1273/**
1274 * Sets the VM-exit guest-linear address VMCS field.
1275 *
1276 * @param pVCpu The cross context virtual CPU structure.
1277 * @param uGuestLinearAddr The VM-exit guest-linear address field.
1278 */
1279DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestLinearAddr(PVMCPU pVCpu, uint64_t uGuestLinearAddr)
1280{
1281 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1282 pVmcs->u64RoGuestLinearAddr.u = uGuestLinearAddr;
1283}
1284
1285
1286/**
1287 * Sets the VM-exit guest-physical address VMCS field.
1288 *
1289 * @param pVCpu The cross context virtual CPU structure.
1290 * @param uGuestPhysAddr The VM-exit guest-physical address field.
1291 */
1292DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestPhysAddr(PVMCPU pVCpu, uint64_t uGuestPhysAddr)
1293{
1294 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1295 pVmcs->u64RoGuestPhysAddr.u = uGuestPhysAddr;
1296}
1297
1298
1299/**
1300 * Sets the VM-exit instruction length VMCS field.
1301 *
1302 * @param pVCpu The cross context virtual CPU structure.
1303 * @param cbInstr The VM-exit instruction length (in bytes).
1304 */
1305DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrLen(PVMCPU pVCpu, uint32_t cbInstr)
1306{
1307 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1308 pVmcs->u32RoExitInstrLen = cbInstr;
1309}
1310
1311
1312/**
1313 * Sets the VM-exit instruction info. VMCS field.
1314 *
1315 * @param pVCpu The cross context virtual CPU structure.
1316 * @param uExitInstrInfo The VM-exit instruction info. field.
1317 */
1318DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitInstrInfo)
1319{
1320 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1321 pVmcs->u32RoExitInstrInfo = uExitInstrInfo;
1322}
1323
1324
1325/**
1326 * Implements VMSucceed for VMX instruction success.
1327 *
1328 * @param pVCpu The cross context virtual CPU structure.
1329 */
1330DECL_FORCE_INLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
1331{
1332 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1333}
1334
1335
1336/**
1337 * Implements VMFailInvalid for VMX instruction failure.
1338 *
1339 * @param pVCpu The cross context virtual CPU structure.
1340 */
1341DECL_FORCE_INLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
1342{
1343 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1344 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
1345}
1346
1347
1348/**
1349 * Implements VMFailValid for VMX instruction failure.
1350 *
1351 * @param pVCpu The cross context virtual CPU structure.
1352 * @param enmInsErr The VM instruction error.
1353 */
1354DECL_FORCE_INLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1355{
1356 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1357 {
1358 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1359 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
1360 iemVmxVmcsSetVmInstrErr(pVCpu, enmInsErr);
1361 }
1362}
1363
1364
1365/**
1366 * Implements VMFail for VMX instruction failure.
1367 *
1368 * @param pVCpu The cross context virtual CPU structure.
1369 * @param enmInsErr The VM instruction error.
1370 */
1371DECL_FORCE_INLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1372{
1373 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1374 iemVmxVmFailValid(pVCpu, enmInsErr);
1375 else
1376 iemVmxVmFailInvalid(pVCpu);
1377}
1378
1379
1380/**
1381 * Checks if the given auto-load/store MSR area count is valid for the
1382 * implementation.
1383 *
1384 * @returns @c true if it's within the valid limit, @c false otherwise.
1385 * @param pVCpu The cross context virtual CPU structure.
1386 * @param uMsrCount The MSR area count to check.
1387 */
1388DECL_FORCE_INLINE(bool) iemVmxIsAutoMsrCountValid(PVMCPU pVCpu, uint32_t uMsrCount)
1389{
1390 uint64_t const u64VmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
1391 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(u64VmxMiscMsr);
1392 Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
1393 if (uMsrCount <= cMaxSupportedMsrs)
1394 return true;
1395 return false;
1396}
1397
1398
1399/**
1400 * Flushes the current VMCS contents back to guest memory.
1401 *
1402 * @returns VBox status code.
1403 * @param pVCpu The cross context virtual CPU structure.
1404 */
1405DECL_FORCE_INLINE(int) iemVmxCommitCurrentVmcsToMemory(PVMCPU pVCpu)
1406{
1407 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1408 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
1409 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));
1410 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
1411 return rc;
1412}
1413
1414
1415/**
1416 * Implements VMSucceed for the VMREAD instruction and increments the guest RIP.
1417 *
1418 * @param pVCpu The cross context virtual CPU structure.
1419 */
1420DECL_FORCE_INLINE(void) iemVmxVmreadSuccess(PVMCPU pVCpu, uint8_t cbInstr)
1421{
1422 iemVmxVmSucceed(pVCpu);
1423 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1424}
1425
1426
1427/**
1428 * Gets the instruction diagnostic for segment base checks during VM-entry of a
1429 * nested-guest.
1430 *
1431 * @param iSegReg The segment index (X86_SREG_XXX).
1432 */
1433IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBase(unsigned iSegReg)
1434{
1435 switch (iSegReg)
1436 {
1437 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseCs;
1438 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseDs;
1439 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseEs;
1440 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseFs;
1441 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseGs;
1442 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseSs;
1443 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_1);
1444 }
1445}
1446
1447
1448/**
1449 * Gets the instruction diagnostic for segment base checks during VM-entry of a
1450 * nested-guest that is in Virtual-8086 mode.
1451 *
1452 * @param iSegReg The segment index (X86_SREG_XXX).
1453 */
1454IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBaseV86(unsigned iSegReg)
1455{
1456 switch (iSegReg)
1457 {
1458 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseV86Cs;
1459 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ds;
1460 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseV86Es;
1461 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseV86Fs;
1462 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseV86Gs;
1463 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ss;
1464 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
1465 }
1466}
1467
1468
1469/**
1470 * Gets the instruction diagnostic for segment limit checks during VM-entry of a
1471 * nested-guest that is in Virtual-8086 mode.
1472 *
1473 * @param iSegReg The segment index (X86_SREG_XXX).
1474 */
1475IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegLimitV86(unsigned iSegReg)
1476{
1477 switch (iSegReg)
1478 {
1479 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegLimitV86Cs;
1480 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ds;
1481 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegLimitV86Es;
1482 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegLimitV86Fs;
1483 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegLimitV86Gs;
1484 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ss;
1485 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_3);
1486 }
1487}
1488
1489
1490/**
1491 * Gets the instruction diagnostic for segment attribute checks during VM-entry of a
1492 * nested-guest that is in Virtual-8086 mode.
1493 *
1494 * @param iSegReg The segment index (X86_SREG_XXX).
1495 */
1496IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrV86(unsigned iSegReg)
1497{
1498 switch (iSegReg)
1499 {
1500 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrV86Cs;
1501 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ds;
1502 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrV86Es;
1503 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrV86Fs;
1504 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrV86Gs;
1505 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ss;
1506 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_4);
1507 }
1508}
1509
1510
1511/**
1512 * Gets the instruction diagnostic for segment attributes reserved bits failure
1513 * during VM-entry of a nested-guest.
1514 *
1515 * @param iSegReg The segment index (X86_SREG_XXX).
1516 */
1517IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrRsvd(unsigned iSegReg)
1518{
1519 switch (iSegReg)
1520 {
1521 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdCs;
1522 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdDs;
1523 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrRsvdEs;
1524 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdFs;
1525 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdGs;
1526 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdSs;
1527 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_5);
1528 }
1529}
1530
1531
1532/**
1533 * Gets the instruction diagnostic for segment attributes descriptor-type
1534 * (code/segment or system) failure during VM-entry of a nested-guest.
1535 *
1536 * @param iSegReg The segment index (X86_SREG_XXX).
1537 */
1538IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDescType(unsigned iSegReg)
1539{
1540 switch (iSegReg)
1541 {
1542 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeCs;
1543 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeDs;
1544 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeEs;
1545 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeFs;
1546 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeGs;
1547 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs;
1548 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_6);
1549 }
1550}
1551
1552
1553/**
1554 * Gets the instruction diagnostic for segment attributes descriptor-type
1555 * (code/segment or system) failure during VM-entry of a nested-guest.
1556 *
1557 * @param iSegReg The segment index (X86_SREG_XXX).
1558 */
1559IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrPresent(unsigned iSegReg)
1560{
1561 switch (iSegReg)
1562 {
1563 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrPresentCs;
1564 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrPresentDs;
1565 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrPresentEs;
1566 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrPresentFs;
1567 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrPresentGs;
1568 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrPresentSs;
1569 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_7);
1570 }
1571}
1572
1573
1574/**
1575 * Gets the instruction diagnostic for segment attribute granularity failure during
1576 * VM-entry of a nested-guest.
1577 *
1578 * @param iSegReg The segment index (X86_SREG_XXX).
1579 */
1580IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrGran(unsigned iSegReg)
1581{
1582 switch (iSegReg)
1583 {
1584 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrGranCs;
1585 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrGranDs;
1586 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrGranEs;
1587 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrGranFs;
1588 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrGranGs;
1589 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrGranSs;
1590 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_8);
1591 }
1592}
1593
1594/**
1595 * Gets the instruction diagnostic for segment attribute DPL/RPL failure during
1596 * VM-entry of a nested-guest.
1597 *
1598 * @param iSegReg The segment index (X86_SREG_XXX).
1599 */
1600IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDplRpl(unsigned iSegReg)
1601{
1602 switch (iSegReg)
1603 {
1604 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplCs;
1605 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplDs;
1606 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDplRplEs;
1607 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplFs;
1608 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplGs;
1609 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplSs;
1610 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_9);
1611 }
1612}
1613
1614
1615/**
1616 * Gets the instruction diagnostic for segment attribute type accessed failure
1617 * during VM-entry of a nested-guest.
1618 *
1619 * @param iSegReg The segment index (X86_SREG_XXX).
1620 */
1621IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrTypeAcc(unsigned iSegReg)
1622{
1623 switch (iSegReg)
1624 {
1625 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccCs;
1626 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccDs;
1627 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccEs;
1628 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccFs;
1629 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccGs;
1630 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs;
1631 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_10);
1632 }
1633}
1634
1635
1636/**
1637 * Gets the instruction diagnostic for guest CR3 referenced PDPTE reserved bits
1638 * failure during VM-entry of a nested-guest.
1639 *
1640 * @param iSegReg The PDPTE entry index.
1641 */
1642IEM_STATIC VMXVDIAG iemVmxGetDiagVmentryPdpteRsvd(unsigned iPdpte)
1643{
1644 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
1645 switch (iPdpte)
1646 {
1647 case 0: return kVmxVDiag_Vmentry_GuestPdpte0Rsvd;
1648 case 1: return kVmxVDiag_Vmentry_GuestPdpte1Rsvd;
1649 case 2: return kVmxVDiag_Vmentry_GuestPdpte2Rsvd;
1650 case 3: return kVmxVDiag_Vmentry_GuestPdpte3Rsvd;
1651 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_11);
1652 }
1653}
1654
1655
1656/**
1657 * Gets the instruction diagnostic for host CR3 referenced PDPTE reserved bits
1658 * failure during VM-exit of a nested-guest.
1659 *
1660 * @param iSegReg The PDPTE entry index.
1661 */
1662IEM_STATIC VMXVDIAG iemVmxGetDiagVmexitPdpteRsvd(unsigned iPdpte)
1663{
1664 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
1665 switch (iPdpte)
1666 {
1667 case 0: return kVmxVDiag_Vmexit_HostPdpte0Rsvd;
1668 case 1: return kVmxVDiag_Vmexit_HostPdpte1Rsvd;
1669 case 2: return kVmxVDiag_Vmexit_HostPdpte2Rsvd;
1670 case 3: return kVmxVDiag_Vmexit_HostPdpte3Rsvd;
1671 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_12);
1672 }
1673}
1674
1675
1676/**
1677 * Saves the guest control registers, debug registers and some MSRs are part of
1678 * VM-exit.
1679 *
1680 * @param pVCpu The cross context virtual CPU structure.
1681 */
1682IEM_STATIC void iemVmxVmexitSaveGuestControlRegsMsrs(PVMCPU pVCpu)
1683{
1684 /*
1685 * Saves the guest control registers, debug registers and some MSRs.
1686 * See Intel spec. 27.3.1 "Saving Control Registers, Debug Registers and MSRs".
1687 */
1688 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1689
1690 /* Save control registers. */
1691 pVmcs->u64GuestCr0.u = pVCpu->cpum.GstCtx.cr0;
1692 pVmcs->u64GuestCr3.u = pVCpu->cpum.GstCtx.cr3;
1693 pVmcs->u64GuestCr4.u = pVCpu->cpum.GstCtx.cr4;
1694
1695 /* Save SYSENTER CS, ESP, EIP. */
1696 pVmcs->u32GuestSysenterCS = pVCpu->cpum.GstCtx.SysEnter.cs;
1697 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1698 {
1699 pVmcs->u64GuestSysenterEsp.u = pVCpu->cpum.GstCtx.SysEnter.esp;
1700 pVmcs->u64GuestSysenterEip.u = pVCpu->cpum.GstCtx.SysEnter.eip;
1701 }
1702 else
1703 {
1704 pVmcs->u64GuestSysenterEsp.s.Lo = pVCpu->cpum.GstCtx.SysEnter.esp;
1705 pVmcs->u64GuestSysenterEip.s.Lo = pVCpu->cpum.GstCtx.SysEnter.eip;
1706 }
1707
1708 /* Save debug registers (DR7 and IA32_DEBUGCTL MSR). */
1709 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG)
1710 {
1711 pVmcs->u64GuestDr7.u = pVCpu->cpum.GstCtx.dr[7];
1712 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
1713 }
1714
1715 /* Save PAT MSR. */
1716 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR)
1717 pVmcs->u64GuestPatMsr.u = pVCpu->cpum.GstCtx.msrPAT;
1718
1719 /* Save EFER MSR. */
1720 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR)
1721 pVmcs->u64GuestEferMsr.u = pVCpu->cpum.GstCtx.msrEFER;
1722
1723 /* We don't support clearing IA32_BNDCFGS MSR yet. */
1724 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR));
1725
1726 /* Nothing to do for SMBASE register - We don't support SMM yet. */
1727}
1728
1729
1730/**
1731 * Saves the guest force-flags in prepartion of entering the nested-guest.
1732 *
1733 * @param pVCpu The cross context virtual CPU structure.
1734 */
1735IEM_STATIC void iemVmxVmentrySaveForceFlags(PVMCPU pVCpu)
1736{
1737 /* We shouldn't be called multiple times during VM-entry. */
1738 Assert(pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions == 0);
1739
1740 /* MTF should not be set outside VMX non-root mode. */
1741 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_MTF));
1742
1743 /*
1744 * Preserve the required force-flags.
1745 *
1746 * We cache and clear force-flags that would affect the execution of the
1747 * nested-guest. Cached flags are then restored while returning to the guest
1748 * if necessary.
1749 *
1750 * - VMCPU_FF_INHIBIT_INTERRUPTS need not be cached as it only affects
1751 * interrupts until the completion of the current VMLAUNCH/VMRESUME
1752 * instruction. Interrupt inhibition for any nested-guest instruction
1753 * will be set later while loading the guest-interruptibility state.
1754 *
1755 * - VMCPU_FF_BLOCK_NMIS needs to be cached as VM-exits caused before
1756 * successful VM-entry needs to continue blocking NMIs if it was in effect
1757 * during VM-entry.
1758 *
1759 * - MTF need not be preserved as it's used only in VMX non-root mode and
1760 * is supplied on VM-entry through the VM-execution controls.
1761 *
1762 * The remaining FFs (e.g. timers, APIC updates) must stay in place so that
1763 * we will be able to generate interrupts that may cause VM-exits for
1764 * the nested-guest.
1765 */
1766 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
1767
1768 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_BLOCK_NMIS))
1769 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_BLOCK_NMIS);
1770}
1771
1772
1773/**
1774 * Restores the guest force-flags in prepartion of exiting the nested-guest.
1775 *
1776 * @param pVCpu The cross context virtual CPU structure.
1777 */
1778IEM_STATIC void iemVmxVmexitRestoreForceFlags(PVMCPU pVCpu)
1779{
1780 if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
1781 {
1782 VMCPU_FF_SET(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);
1783 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;
1784 }
1785}
1786
1787
1788/**
1789 * Perform a VMX transition updated PGM, IEM and CPUM.
1790 *
1791 * @param pVCpu The cross context virtual CPU structure.
1792 */
1793IEM_STATIC int iemVmxWorldSwitch(PVMCPU pVCpu)
1794{
1795 /*
1796 * Inform PGM about paging mode changes.
1797 * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
1798 * see comment in iemMemPageTranslateAndCheckAccess().
1799 */
1800 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
1801# ifdef IN_RING3
1802 Assert(rc != VINF_PGM_CHANGE_MODE);
1803# endif
1804 AssertRCReturn(rc, rc);
1805
1806 /* Inform CPUM (recompiler), can later be removed. */
1807 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1808
1809 /*
1810 * Flush the TLB with new CR3. This is required in case the PGM mode change
1811 * above doesn't actually change anything.
1812 */
1813 if (rc == VINF_SUCCESS)
1814 {
1815 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true);
1816 AssertRCReturn(rc, rc);
1817 }
1818
1819 /* Re-initialize IEM cache/state after the drastic mode switch. */
1820 iemReInitExec(pVCpu);
1821 return rc;
1822}
1823
1824
1825/**
1826 * Saves guest segment registers, GDTR, IDTR, LDTR, TR as part of VM-exit.
1827 *
1828 * @param pVCpu The cross context virtual CPU structure.
1829 */
1830IEM_STATIC void iemVmxVmexitSaveGuestSegRegs(PVMCPU pVCpu)
1831{
1832 /*
1833 * Save guest segment registers, GDTR, IDTR, LDTR, TR.
1834 * See Intel spec 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
1835 */
1836 /* CS, SS, ES, DS, FS, GS. */
1837 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1838 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
1839 {
1840 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1841 if (!pSelReg->Attr.n.u1Unusable)
1842 iemVmxVmcsSetGuestSegReg(pVmcs, iSegReg, pSelReg);
1843 else
1844 {
1845 /*
1846 * For unusable segments the attributes are undefined except for CS and SS.
1847 * For the rest we don't bother preserving anything but the unusable bit.
1848 */
1849 switch (iSegReg)
1850 {
1851 case X86_SREG_CS:
1852 pVmcs->GuestCs = pSelReg->Sel;
1853 pVmcs->u64GuestCsBase.u = pSelReg->u64Base;
1854 pVmcs->u32GuestCsLimit = pSelReg->u32Limit;
1855 pVmcs->u32GuestCsAttr = pSelReg->Attr.u & ( X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
1856 | X86DESCATTR_UNUSABLE);
1857 break;
1858
1859 case X86_SREG_SS:
1860 pVmcs->GuestSs = pSelReg->Sel;
1861 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1862 pVmcs->u64GuestSsBase.u &= UINT32_C(0xffffffff);
1863 pVmcs->u32GuestSsAttr = pSelReg->Attr.u & (X86DESCATTR_DPL | X86DESCATTR_UNUSABLE);
1864 break;
1865
1866 case X86_SREG_DS:
1867 pVmcs->GuestDs = pSelReg->Sel;
1868 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1869 pVmcs->u64GuestDsBase.u &= UINT32_C(0xffffffff);
1870 pVmcs->u32GuestDsAttr = X86DESCATTR_UNUSABLE;
1871 break;
1872
1873 case X86_SREG_ES:
1874 pVmcs->GuestEs = pSelReg->Sel;
1875 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1876 pVmcs->u64GuestEsBase.u &= UINT32_C(0xffffffff);
1877 pVmcs->u32GuestEsAttr = X86DESCATTR_UNUSABLE;
1878 break;
1879
1880 case X86_SREG_FS:
1881 pVmcs->GuestFs = pSelReg->Sel;
1882 pVmcs->u64GuestFsBase.u = pSelReg->u64Base;
1883 pVmcs->u32GuestFsAttr = X86DESCATTR_UNUSABLE;
1884 break;
1885
1886 case X86_SREG_GS:
1887 pVmcs->GuestGs = pSelReg->Sel;
1888 pVmcs->u64GuestGsBase.u = pSelReg->u64Base;
1889 pVmcs->u32GuestGsAttr = X86DESCATTR_UNUSABLE;
1890 break;
1891 }
1892 }
1893 }
1894
1895 /* Segment attribute bits 31:7 and 11:8 MBZ. */
1896 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
1897 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_UNUSABLE;
1898 /* LDTR. */
1899 {
1900 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.ldtr;
1901 pVmcs->GuestLdtr = pSelReg->Sel;
1902 pVmcs->u64GuestLdtrBase.u = pSelReg->u64Base;
1903 Assert(X86_IS_CANONICAL(pSelReg->u64Base));
1904 pVmcs->u32GuestLdtrLimit = pSelReg->u32Limit;
1905 pVmcs->u32GuestLdtrAttr = pSelReg->Attr.u & fValidAttrMask;
1906 }
1907
1908 /* TR. */
1909 {
1910 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.tr;
1911 pVmcs->GuestTr = pSelReg->Sel;
1912 pVmcs->u64GuestTrBase.u = pSelReg->u64Base;
1913 pVmcs->u32GuestTrLimit = pSelReg->u32Limit;
1914 pVmcs->u32GuestTrAttr = pSelReg->Attr.u & fValidAttrMask;
1915 }
1916
1917 /* GDTR. */
1918 pVmcs->u64GuestGdtrBase.u = pVCpu->cpum.GstCtx.gdtr.pGdt;
1919 pVmcs->u32GuestGdtrLimit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
1920
1921 /* IDTR. */
1922 pVmcs->u64GuestIdtrBase.u = pVCpu->cpum.GstCtx.idtr.pIdt;
1923 pVmcs->u32GuestIdtrLimit = pVCpu->cpum.GstCtx.idtr.cbIdt;
1924}
1925
1926
1927/**
1928 * Saves guest non-register state as part of VM-exit.
1929 *
1930 * @param pVCpu The cross context virtual CPU structure.
1931 * @param uExitReason The VM-exit reason.
1932 */
1933IEM_STATIC void iemVmxVmexitSaveGuestNonRegState(PVMCPU pVCpu, uint32_t uExitReason)
1934{
1935 /*
1936 * Save guest non-register state.
1937 * See Intel spec. 27.3.4 "Saving Non-Register State".
1938 */
1939 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1940
1941 /* Activity-state: VM-exits occur before changing the activity state, nothing further to do */
1942
1943 /* Interruptibility-state. */
1944 pVmcs->u32GuestIntrState = 0;
1945 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
1946 { /** @todo NSTVMX: Virtual-NMI blocking. */ }
1947 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
1948 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
1949
1950 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1951 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
1952 {
1953 /** @todo NSTVMX: We can't distinguish between blocking-by-MovSS and blocking-by-STI
1954 * currently. */
1955 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
1956 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1957 }
1958 /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */
1959
1960 /* Pending debug exceptions. */
1961 if ( uExitReason != VMX_EXIT_INIT_SIGNAL
1962 && uExitReason != VMX_EXIT_SMI
1963 && uExitReason != VMX_EXIT_ERR_MACHINE_CHECK
1964 && !HMVmxIsTrapLikeVmexit(uExitReason))
1965 {
1966 /** @todo NSTVMX: also must exclude VM-exits caused by debug exceptions when
1967 * block-by-MovSS is in effect. */
1968 pVmcs->u64GuestPendingDbgXcpt.u = 0;
1969 }
1970
1971 /** @todo NSTVMX: Save VMX preemption timer value. */
1972
1973 /* PDPTEs. */
1974 /* We don't support EPT yet. */
1975 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
1976 pVmcs->u64GuestPdpte0.u = 0;
1977 pVmcs->u64GuestPdpte1.u = 0;
1978 pVmcs->u64GuestPdpte2.u = 0;
1979 pVmcs->u64GuestPdpte3.u = 0;
1980}
1981
1982
1983/**
1984 * Saves the guest-state as part of VM-exit.
1985 *
1986 * @returns VBox status code.
1987 * @param pVCpu The cross context virtual CPU structure.
1988 * @param uExitReason The VM-exit reason.
1989 */
1990IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPU pVCpu, uint32_t uExitReason)
1991{
1992 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1993 Assert(pVmcs);
1994
1995 iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu);
1996 iemVmxVmexitSaveGuestSegRegs(pVCpu);
1997
1998 /*
1999 * Save guest RIP, RSP and RFLAGS.
2000 * See Intel spec. 27.3.3 "Saving RIP, RSP and RFLAGS".
2001 */
2002 /* We don't support enclave mode yet. */
2003 pVmcs->u64GuestRip.u = pVCpu->cpum.GstCtx.rip;
2004 pVmcs->u64GuestRsp.u = pVCpu->cpum.GstCtx.rsp;
2005 pVmcs->u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u; /** @todo NSTVMX: Check RFLAGS.RF handling. */
2006
2007 iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason);
2008}
2009
2010
2011/**
2012 * Saves the guest MSRs into the VM-exit auto-store MSRs area as part of VM-exit.
2013 *
2014 * @returns VBox status code.
2015 * @param pVCpu The cross context virtual CPU structure.
2016 * @param uExitReason The VM-exit reason (for diagnostic purposes).
2017 */
2018IEM_STATIC int iemVmxVmexitSaveGuestAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
2019{
2020 /*
2021 * Save guest MSRs.
2022 * See Intel spec. 27.4 "Saving MSRs".
2023 */
2024 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2025 const char *const pszFailure = "VMX-abort";
2026
2027 /*
2028 * The VM-exit MSR-store area address need not be a valid guest-physical address if the
2029 * VM-exit MSR-store count is 0. If this is the case, bail early without reading it.
2030 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
2031 */
2032 uint32_t const cMsrs = pVmcs->u32ExitMsrStoreCount;
2033 if (!cMsrs)
2034 return VINF_SUCCESS;
2035
2036 /*
2037 * Verify the MSR auto-store count. Physical CPUs can behave unpredictably if the count
2038 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
2039 * implementation causes a VMX-abort followed by a triple-fault.
2040 */
2041 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
2042 if (fIsMsrCountValid)
2043 { /* likely */ }
2044 else
2045 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount);
2046
2047 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
2048 Assert(pMsr);
2049 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
2050 {
2051 if ( !pMsr->u32Reserved
2052 && pMsr->u32Msr != MSR_IA32_SMBASE
2053 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
2054 {
2055 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pMsr->u32Msr, &pMsr->u64Value);
2056 if (rcStrict == VINF_SUCCESS)
2057 continue;
2058
2059 /*
2060 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
2061 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
2062 * recording the MSR index in the auxiliary info. field and indicated further by our
2063 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
2064 * if possible, or come up with a better, generic solution.
2065 */
2066 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2067 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_READ
2068 ? kVmxVDiag_Vmexit_MsrStoreRing3
2069 : kVmxVDiag_Vmexit_MsrStore;
2070 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2071 }
2072 else
2073 {
2074 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2075 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreRsvd);
2076 }
2077 }
2078
2079 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrStore.u;
2080 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysAutoMsrArea,
2081 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), VMX_V_AUTOMSR_AREA_SIZE);
2082 if (RT_SUCCESS(rc))
2083 { /* likely */ }
2084 else
2085 {
2086 AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
2087 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys);
2088 }
2089
2090 NOREF(uExitReason);
2091 NOREF(pszFailure);
2092 return VINF_SUCCESS;
2093}
2094
2095
2096/**
2097 * Performs a VMX abort (due to an fatal error during VM-exit).
2098 *
2099 * @returns Strict VBox status code.
2100 * @param pVCpu The cross context virtual CPU structure.
2101 * @param enmAbort The VMX abort reason.
2102 */
2103IEM_STATIC VBOXSTRICTRC iemVmxAbort(PVMCPU pVCpu, VMXABORT enmAbort)
2104{
2105 /*
2106 * Perform the VMX abort.
2107 * See Intel spec. 27.7 "VMX Aborts".
2108 */
2109 LogFunc(("enmAbort=%u (%s) -> RESET\n", enmAbort, HMVmxGetAbortDesc(enmAbort)));
2110
2111 /* We don't support SMX yet. */
2112 pVCpu->cpum.GstCtx.hwvirt.vmx.enmAbort = enmAbort;
2113 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
2114 {
2115 RTGCPHYS const GCPhysVmcs = IEM_VMX_GET_CURRENT_VMCS(pVCpu);
2116 uint32_t const offVmxAbort = RT_OFFSETOF(VMXVVMCS, u32VmxAbortId);
2117 PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + offVmxAbort, &enmAbort, sizeof(enmAbort));
2118 }
2119
2120 return VINF_EM_TRIPLE_FAULT;
2121}
2122
2123
2124/**
2125 * Loads host control registers, debug registers and MSRs as part of VM-exit.
2126 *
2127 * @param pVCpu The cross context virtual CPU structure.
2128 */
2129IEM_STATIC void iemVmxVmexitLoadHostControlRegsMsrs(PVMCPU pVCpu)
2130{
2131 /*
2132 * Load host control registers, debug registers and MSRs.
2133 * See Intel spec. 27.5.1 "Loading Host Control Registers, Debug Registers, MSRs".
2134 */
2135 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2136 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2137
2138 /* CR0. */
2139 {
2140 /* Bits 63:32, 28:19, 17, 15:6, ET, CD, NW and CR0 MB1 bits are not modified. */
2141 uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
2142 uint64_t const fCr0IgnMask = UINT64_C(0xffffffff1ff8ffc0) | X86_CR0_ET | X86_CR0_CD | X86_CR0_NW | uCr0Fixed0;
2143 uint64_t const uHostCr0 = pVmcs->u64HostCr0.u;
2144 uint64_t const uGuestCr0 = pVCpu->cpum.GstCtx.cr0;
2145 uint64_t const uValidCr0 = (uHostCr0 & ~fCr0IgnMask) | (uGuestCr0 & fCr0IgnMask);
2146 CPUMSetGuestCR0(pVCpu, uValidCr0);
2147 }
2148
2149 /* CR4. */
2150 {
2151 /* CR4 MB1 bits are not modified. */
2152 uint64_t const fCr4IgnMask = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
2153 uint64_t const uHostCr4 = pVmcs->u64HostCr4.u;
2154 uint64_t const uGuestCr4 = pVCpu->cpum.GstCtx.cr4;
2155 uint64_t uValidCr4 = (uHostCr4 & ~fCr4IgnMask) | (uGuestCr4 & fCr4IgnMask);
2156 if (fHostInLongMode)
2157 uValidCr4 |= X86_CR4_PAE;
2158 else
2159 uValidCr4 &= ~X86_CR4_PCIDE;
2160 CPUMSetGuestCR4(pVCpu, uValidCr4);
2161 }
2162
2163 /* CR3 (host value validated while checking host-state during VM-entry). */
2164 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64HostCr3.u;
2165
2166 /* DR7. */
2167 pVCpu->cpum.GstCtx.dr[7] = X86_DR7_INIT_VAL;
2168
2169 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
2170
2171 /* Save SYSENTER CS, ESP, EIP (host value validated while checking host-state during VM-entry). */
2172 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64HostSysenterEip.u;
2173 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64HostSysenterEsp.u;
2174 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32HostSysenterCs;
2175
2176 /* FS, GS bases are loaded later while we load host segment registers. */
2177
2178 /* EFER MSR (host value validated while checking host-state during VM-entry). */
2179 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
2180 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64HostEferMsr.u;
2181 else if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2182 {
2183 if (fHostInLongMode)
2184 pVCpu->cpum.GstCtx.msrEFER |= (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
2185 else
2186 pVCpu->cpum.GstCtx.msrEFER &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
2187 }
2188
2189 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
2190
2191 /* PAT MSR (host value is validated while checking host-state during VM-entry). */
2192 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
2193 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64HostPatMsr.u;
2194
2195 /* We don't support IA32_BNDCFGS MSR yet. */
2196}
2197
2198
2199/**
2200 * Loads host segment registers, GDTR, IDTR, LDTR and TR as part of VM-exit.
2201 *
2202 * @param pVCpu The cross context virtual CPU structure.
2203 */
2204IEM_STATIC void iemVmxVmexitLoadHostSegRegs(PVMCPU pVCpu)
2205{
2206 /*
2207 * Load host segment registers, GDTR, IDTR, LDTR and TR.
2208 * See Intel spec. 27.5.2 "Loading Host Segment and Descriptor-Table Registers".
2209 *
2210 * Warning! Be careful to not touch fields that are reserved by VT-x,
2211 * e.g. segment limit high bits stored in segment attributes (in bits 11:8).
2212 */
2213 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2214 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2215
2216 /* CS, SS, ES, DS, FS, GS. */
2217 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
2218 {
2219 RTSEL const HostSel = iemVmxVmcsGetHostSelReg(pVmcs, iSegReg);
2220 bool const fUnusable = RT_BOOL(HostSel == 0);
2221
2222 /* Selector. */
2223 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel = HostSel;
2224 pVCpu->cpum.GstCtx.aSRegs[iSegReg].ValidSel = HostSel;
2225 pVCpu->cpum.GstCtx.aSRegs[iSegReg].fFlags = CPUMSELREG_FLAGS_VALID;
2226
2227 /* Limit. */
2228 pVCpu->cpum.GstCtx.aSRegs[iSegReg].u32Limit = 0xffffffff;
2229
2230 /* Base and Attributes. */
2231 switch (iSegReg)
2232 {
2233 case X86_SREG_CS:
2234 {
2235 pVCpu->cpum.GstCtx.cs.u64Base = 0;
2236 pVCpu->cpum.GstCtx.cs.Attr.n.u4Type = X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED;
2237 pVCpu->cpum.GstCtx.ss.Attr.n.u1DescType = 1;
2238 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = 0;
2239 pVCpu->cpum.GstCtx.cs.Attr.n.u1Present = 1;
2240 pVCpu->cpum.GstCtx.cs.Attr.n.u1Long = fHostInLongMode;
2241 pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig = !fHostInLongMode;
2242 pVCpu->cpum.GstCtx.cs.Attr.n.u1Granularity = 1;
2243 Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Unusable);
2244 Assert(!fUnusable);
2245 break;
2246 }
2247
2248 case X86_SREG_SS:
2249 case X86_SREG_ES:
2250 case X86_SREG_DS:
2251 {
2252 pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base = 0;
2253 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2254 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DescType = 1;
2255 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u2Dpl = 0;
2256 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Present = 1;
2257 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DefBig = 1;
2258 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Granularity = 1;
2259 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable = fUnusable;
2260 break;
2261 }
2262
2263 case X86_SREG_FS:
2264 {
2265 Assert(X86_IS_CANONICAL(pVmcs->u64HostFsBase.u));
2266 pVCpu->cpum.GstCtx.fs.u64Base = !fUnusable ? pVmcs->u64HostFsBase.u : 0;
2267 pVCpu->cpum.GstCtx.fs.Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2268 pVCpu->cpum.GstCtx.fs.Attr.n.u1DescType = 1;
2269 pVCpu->cpum.GstCtx.fs.Attr.n.u2Dpl = 0;
2270 pVCpu->cpum.GstCtx.fs.Attr.n.u1Present = 1;
2271 pVCpu->cpum.GstCtx.fs.Attr.n.u1DefBig = 1;
2272 pVCpu->cpum.GstCtx.fs.Attr.n.u1Granularity = 1;
2273 pVCpu->cpum.GstCtx.fs.Attr.n.u1Unusable = fUnusable;
2274 break;
2275 }
2276
2277 case X86_SREG_GS:
2278 {
2279 Assert(X86_IS_CANONICAL(pVmcs->u64HostGsBase.u));
2280 pVCpu->cpum.GstCtx.gs.u64Base = !fUnusable ? pVmcs->u64HostGsBase.u : 0;
2281 pVCpu->cpum.GstCtx.gs.Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2282 pVCpu->cpum.GstCtx.gs.Attr.n.u1DescType = 1;
2283 pVCpu->cpum.GstCtx.gs.Attr.n.u2Dpl = 0;
2284 pVCpu->cpum.GstCtx.gs.Attr.n.u1Present = 1;
2285 pVCpu->cpum.GstCtx.gs.Attr.n.u1DefBig = 1;
2286 pVCpu->cpum.GstCtx.gs.Attr.n.u1Granularity = 1;
2287 pVCpu->cpum.GstCtx.gs.Attr.n.u1Unusable = fUnusable;
2288 break;
2289 }
2290 }
2291 }
2292
2293 /* TR. */
2294 Assert(X86_IS_CANONICAL(pVmcs->u64HostTrBase.u));
2295 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1Unusable);
2296 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->HostTr;
2297 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->HostTr;
2298 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2299 pVCpu->cpum.GstCtx.tr.u32Limit = X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN;
2300 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64HostTrBase.u;
2301 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2302 pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType = 0;
2303 pVCpu->cpum.GstCtx.tr.Attr.n.u2Dpl = 0;
2304 pVCpu->cpum.GstCtx.tr.Attr.n.u1Present = 1;
2305 pVCpu->cpum.GstCtx.tr.Attr.n.u1DefBig = 0;
2306 pVCpu->cpum.GstCtx.tr.Attr.n.u1Granularity = 0;
2307
2308 /* LDTR. */
2309 pVCpu->cpum.GstCtx.ldtr.Sel = 0;
2310 pVCpu->cpum.GstCtx.ldtr.ValidSel = 0;
2311 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2312 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
2313 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
2314 pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Unusable = 1;
2315
2316 /* GDTR. */
2317 Assert(X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u));
2318 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64HostGdtrBase.u;
2319 pVCpu->cpum.GstCtx.gdtr.cbGdt = 0xfff;
2320
2321 /* IDTR.*/
2322 Assert(X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u));
2323 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64HostIdtrBase.u;
2324 pVCpu->cpum.GstCtx.idtr.cbIdt = 0xfff;
2325}
2326
2327
2328/**
2329 * Checks host PDPTes as part of VM-exit.
2330 *
2331 * @param pVCpu The cross context virtual CPU structure.
2332 * @param uExitReason The VM-exit reason (for logging purposes).
2333 */
2334IEM_STATIC int iemVmxVmexitCheckHostPdptes(PVMCPU pVCpu, uint32_t uExitReason)
2335{
2336 /*
2337 * Check host PDPTEs.
2338 * See Intel spec. 27.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
2339 */
2340 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2341 const char *const pszFailure = "VMX-abort";
2342 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2343
2344 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
2345 && !fHostInLongMode)
2346 {
2347 uint64_t const uHostCr3 = pVCpu->cpum.GstCtx.cr3 & X86_CR3_PAE_PAGE_MASK;
2348 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
2349 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uHostCr3, sizeof(aPdptes));
2350 if (RT_SUCCESS(rc))
2351 {
2352 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
2353 {
2354 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
2355 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
2356 { /* likely */ }
2357 else
2358 {
2359 VMXVDIAG const enmDiag = iemVmxGetDiagVmexitPdpteRsvd(iPdpte);
2360 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2361 }
2362 }
2363 }
2364 else
2365 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpteCr3ReadPhys);
2366 }
2367
2368 NOREF(pszFailure);
2369 NOREF(uExitReason);
2370 return VINF_SUCCESS;
2371}
2372
2373
2374/**
2375 * Loads the host MSRs from the VM-exit auto-load MSRs area as part of VM-exit.
2376 *
2377 * @returns VBox status code.
2378 * @param pVCpu The cross context virtual CPU structure.
2379 * @param pszInstr The VMX instruction name (for logging purposes).
2380 */
2381IEM_STATIC int iemVmxVmexitLoadHostAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
2382{
2383 /*
2384 * Load host MSRs.
2385 * See Intel spec. 27.6 "Loading MSRs".
2386 */
2387 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2388 const char *const pszFailure = "VMX-abort";
2389
2390 /*
2391 * The VM-exit MSR-load area address need not be a valid guest-physical address if the
2392 * VM-exit MSR load count is 0. If this is the case, bail early without reading it.
2393 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
2394 */
2395 uint32_t const cMsrs = pVmcs->u32ExitMsrLoadCount;
2396 if (!cMsrs)
2397 return VINF_SUCCESS;
2398
2399 /*
2400 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count
2401 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
2402 * implementation causes a VMX-abort followed by a triple-fault.
2403 */
2404 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
2405 if (fIsMsrCountValid)
2406 { /* likely */ }
2407 else
2408 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount);
2409
2410 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrLoad.u;
2411 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
2412 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
2413 if (RT_SUCCESS(rc))
2414 {
2415 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
2416 Assert(pMsr);
2417 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
2418 {
2419 if ( !pMsr->u32Reserved
2420 && pMsr->u32Msr != MSR_K8_FS_BASE
2421 && pMsr->u32Msr != MSR_K8_GS_BASE
2422 && pMsr->u32Msr != MSR_K6_EFER
2423 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
2424 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
2425 {
2426 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
2427 if (rcStrict == VINF_SUCCESS)
2428 continue;
2429
2430 /*
2431 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
2432 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
2433 * recording the MSR index in the auxiliary info. field and indicated further by our
2434 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
2435 * if possible, or come up with a better, generic solution.
2436 */
2437 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2438 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
2439 ? kVmxVDiag_Vmexit_MsrLoadRing3
2440 : kVmxVDiag_Vmexit_MsrLoad;
2441 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2442 }
2443 else
2444 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadRsvd);
2445 }
2446 }
2447 else
2448 {
2449 AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
2450 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys);
2451 }
2452
2453 NOREF(uExitReason);
2454 NOREF(pszFailure);
2455 return VINF_SUCCESS;
2456}
2457
2458
2459/**
2460 * Loads the host state as part of VM-exit.
2461 *
2462 * @returns Strict VBox status code.
2463 * @param pVCpu The cross context virtual CPU structure.
2464 * @param uExitReason The VM-exit reason (for logging purposes).
2465 */
2466IEM_STATIC VBOXSTRICTRC iemVmxVmexitLoadHostState(PVMCPU pVCpu, uint32_t uExitReason)
2467{
2468 /*
2469 * Load host state.
2470 * See Intel spec. 27.5 "Loading Host State".
2471 */
2472 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2473 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2474
2475 /* We cannot return from a long-mode guest to a host that is not in long mode. */
2476 if ( CPUMIsGuestInLongMode(pVCpu)
2477 && !fHostInLongMode)
2478 {
2479 Log(("VM-exit from long-mode guest to host not in long-mode -> VMX-Abort\n"));
2480 return iemVmxAbort(pVCpu, VMXABORT_HOST_NOT_IN_LONG_MODE);
2481 }
2482
2483 iemVmxVmexitLoadHostControlRegsMsrs(pVCpu);
2484 iemVmxVmexitLoadHostSegRegs(pVCpu);
2485
2486 /*
2487 * Load host RIP, RSP and RFLAGS.
2488 * See Intel spec. 27.5.3 "Loading Host RIP, RSP and RFLAGS"
2489 */
2490 pVCpu->cpum.GstCtx.rip = pVmcs->u64HostRip.u;
2491 pVCpu->cpum.GstCtx.rsp = pVmcs->u64HostRsp.u;
2492 pVCpu->cpum.GstCtx.rflags.u = X86_EFL_1;
2493
2494 /* Update non-register state. */
2495 iemVmxVmexitRestoreForceFlags(pVCpu);
2496
2497 /* Clear address range monitoring. */
2498 EMMonitorWaitClear(pVCpu);
2499
2500 /* Perform the VMX transition (PGM updates). */
2501 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
2502 if (rcStrict == VINF_SUCCESS)
2503 {
2504 /* Check host PDPTEs (only when we've fully switched page tables_. */
2505 /** @todo r=ramshankar: I don't know if PGM does this for us already or not... */
2506 int rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason);
2507 if (RT_FAILURE(rc))
2508 {
2509 Log(("VM-exit failed while restoring host PDPTEs -> VMX-Abort\n"));
2510 return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
2511 }
2512 }
2513 else if (RT_SUCCESS(rcStrict))
2514 {
2515 Log3(("VM-exit: iemVmxWorldSwitch returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),
2516 uExitReason));
2517 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2518 }
2519 else
2520 {
2521 Log3(("VM-exit: iemVmxWorldSwitch failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
2522 return VBOXSTRICTRC_VAL(rcStrict);
2523 }
2524
2525 Assert(rcStrict == VINF_SUCCESS);
2526
2527 /* Load MSRs from the VM-exit auto-load MSR area. */
2528 int rc = iemVmxVmexitLoadHostAutoMsrs(pVCpu, uExitReason);
2529 if (RT_FAILURE(rc))
2530 {
2531 Log(("VM-exit failed while loading host MSRs -> VMX-Abort\n"));
2532 return iemVmxAbort(pVCpu, VMXABORT_LOAD_HOST_MSR);
2533 }
2534
2535 return rcStrict;
2536}
2537
2538
2539/**
2540 * VMX VM-exit handler.
2541 *
2542 * @returns Strict VBox status code.
2543 * @param pVCpu The cross context virtual CPU structure.
2544 * @param uExitReason The VM-exit reason.
2545 */
2546IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason)
2547{
2548 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2549 Assert(pVmcs);
2550
2551 pVmcs->u32RoExitReason = uExitReason;
2552
2553 /** @todo NSTVMX: IEMGetCurrentXcpt will be VM-exit interruption info. */
2554 /** @todo NSTVMX: The source event should be recorded in IDT-vectoring info
2555 * during injection. */
2556
2557 /*
2558 * Save the guest state back into the VMCS.
2559 * We only need to save the state when the VM-entry was successful.
2560 */
2561 bool const fVmentryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
2562 if (!fVmentryFailed)
2563 {
2564 iemVmxVmexitSaveGuestState(pVCpu, uExitReason);
2565 int rc = iemVmxVmexitSaveGuestAutoMsrs(pVCpu, uExitReason);
2566 if (RT_SUCCESS(rc))
2567 { /* likely */ }
2568 else
2569 return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS);
2570 }
2571
2572 /*
2573 * The high bits of the VM-exit reason are only relevant when the VM-exit occurs in
2574 * enclave mode/SMM which we don't support yet. If we ever add support for it, we can
2575 * pass just the lower bits, till then an assert should suffice.
2576 */
2577 Assert(!RT_HI_U16(uExitReason));
2578
2579 VBOXSTRICTRC rcStrict = iemVmxVmexitLoadHostState(pVCpu, uExitReason);
2580 if (RT_FAILURE(rcStrict))
2581 LogFunc(("Loading host-state failed. uExitReason=%u rc=%Rrc\n", uExitReason, VBOXSTRICTRC_VAL(rcStrict)));
2582
2583 /* We're no longer in nested-guest execution mode. */
2584 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
2585
2586 return rcStrict;
2587}
2588
2589
2590/**
2591 * VMX VM-exit handler for VM-exits due to instruction execution.
2592 *
2593 * This is intended for instructions where the caller provides all the relevant
2594 * VM-exit information.
2595 *
2596 * @param pVCpu The cross context virtual CPU structure.
2597 * @param pExitInfo Pointer to the VM-exit instruction information struct.
2598 */
2599DECLINLINE(VBOXSTRICTRC) iemVmxVmexitInstrWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
2600{
2601 /*
2602 * For instructions where any of the following fields are not applicable:
2603 * - VM-exit instruction info. is undefined.
2604 * - VM-exit qualification must be cleared.
2605 * - VM-exit guest-linear address is undefined.
2606 * - VM-exit guest-physical address is undefined.
2607 *
2608 * The VM-exit instruction length is mandatory for all VM-exits that are caused by
2609 * instruction execution.
2610 *
2611 * In our implementation, all undefined fields are generally cleared (caller's
2612 * responsibility).
2613 *
2614 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2615 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
2616 */
2617 Assert(pExitInfo);
2618 AssertMsg(pExitInfo->uReason <= VMX_EXIT_MAX, ("uReason=%u\n", pExitInfo->uReason));
2619 AssertMsg(pExitInfo->cbInstr >= 1 && pExitInfo->cbInstr <= 15,
2620 ("uReason=%u cbInstr=%u\n", pExitInfo->uReason, pExitInfo->cbInstr));
2621
2622 /* Update all the relevant fields from the VM-exit instruction information struct. */
2623 iemVmxVmcsSetExitInstrInfo(pVCpu, pExitInfo->InstrInfo.u);
2624 iemVmxVmcsSetExitQual(pVCpu, pExitInfo->u64Qual);
2625 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, pExitInfo->u64GuestLinearAddr);
2626 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, pExitInfo->u64GuestPhysAddr);
2627 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
2628
2629 /* Perform the VM-exit. */
2630 return iemVmxVmexit(pVCpu, pExitInfo->uReason);
2631}
2632
2633
2634/**
2635 * VMX VM-exit handler for VM-exits due to instruction execution.
2636 *
2637 * This is intended for instructions that only provide the VM-exit instruction
2638 * length.
2639 *
2640 * @param pVCpu The cross context virtual CPU structure.
2641 * @param uExitReason The VM-exit reason.
2642 * @param cbInstr The instruction length (in bytes).
2643 */
2644IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstr(PVMCPU pVCpu, uint32_t uExitReason, uint8_t cbInstr)
2645{
2646 VMXVEXITINFO ExitInfo;
2647 RT_ZERO(ExitInfo);
2648 ExitInfo.uReason = uExitReason;
2649 ExitInfo.cbInstr = cbInstr;
2650
2651#ifdef VBOX_STRICT
2652 /* To prevent us from shooting ourselves in the foot. Maybe remove later. */
2653 switch (uExitReason)
2654 {
2655 case VMX_EXIT_INVEPT:
2656 case VMX_EXIT_INVPCID:
2657 case VMX_EXIT_LDTR_TR_ACCESS:
2658 case VMX_EXIT_GDTR_IDTR_ACCESS:
2659 case VMX_EXIT_VMCLEAR:
2660 case VMX_EXIT_VMPTRLD:
2661 case VMX_EXIT_VMPTRST:
2662 case VMX_EXIT_VMREAD:
2663 case VMX_EXIT_VMWRITE:
2664 case VMX_EXIT_VMXON:
2665 case VMX_EXIT_XRSTORS:
2666 case VMX_EXIT_XSAVES:
2667 case VMX_EXIT_RDRAND:
2668 case VMX_EXIT_RDSEED:
2669 case VMX_EXIT_IO_INSTR:
2670 AssertMsgFailedReturn(("Use iemVmxVmexitInstrNeedsInfo for uExitReason=%u\n", uExitReason), VERR_IEM_IPE_5);
2671 break;
2672 }
2673#endif
2674
2675 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2676}
2677
2678
2679/**
2680 * VMX VM-exit handler for VM-exits due to instruction execution.
2681 *
2682 * This is intended for instructions that have a ModR/M byte and update the VM-exit
2683 * instruction information and VM-exit qualification fields.
2684 *
2685 * @param pVCpu The cross context virtual CPU structure.
2686 * @param uExitReason The VM-exit reason.
2687 * @param uInstrid The instruction identity (VMXINSTRID_XXX).
2688 * @param cbInstr The instruction length (in bytes).
2689 *
2690 * @remarks Do not use this for INS/OUTS instruction.
2691 */
2692IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr)
2693{
2694 VMXVEXITINFO ExitInfo;
2695 RT_ZERO(ExitInfo);
2696 ExitInfo.uReason = uExitReason;
2697 ExitInfo.cbInstr = cbInstr;
2698
2699 /*
2700 * Update the VM-exit qualification field with displacement bytes.
2701 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2702 */
2703 switch (uExitReason)
2704 {
2705 case VMX_EXIT_INVEPT:
2706 case VMX_EXIT_INVPCID:
2707 case VMX_EXIT_LDTR_TR_ACCESS:
2708 case VMX_EXIT_GDTR_IDTR_ACCESS:
2709 case VMX_EXIT_VMCLEAR:
2710 case VMX_EXIT_VMPTRLD:
2711 case VMX_EXIT_VMPTRST:
2712 case VMX_EXIT_VMREAD:
2713 case VMX_EXIT_VMWRITE:
2714 case VMX_EXIT_VMXON:
2715 case VMX_EXIT_XRSTORS:
2716 case VMX_EXIT_XSAVES:
2717 case VMX_EXIT_RDRAND:
2718 case VMX_EXIT_RDSEED:
2719 {
2720 /* Construct the VM-exit instruction information. */
2721 RTGCPTR GCPtrDisp;
2722 uint32_t const uInstrInfo = iemVmxGetExitInstrInfo(pVCpu, uExitReason, uInstrId, &GCPtrDisp);
2723
2724 /* Update the VM-exit instruction information. */
2725 ExitInfo.InstrInfo.u = uInstrInfo;
2726
2727 /* Update the VM-exit qualification. */
2728 ExitInfo.u64Qual = GCPtrDisp;
2729 break;
2730 }
2731
2732 default:
2733 AssertMsgFailedReturn(("Use instruction-specific handler\n"), VERR_IEM_IPE_5);
2734 break;
2735 }
2736
2737 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2738}
2739
2740
2741/**
2742 * VMX VM-exit handler for VM-exits due to INVLPG.
2743 *
2744 * @param pVCpu The cross context virtual CPU structure.
2745 * @param GCPtrPage The guest-linear address of the page being invalidated.
2746 * @param cbInstr The instruction length (in bytes).
2747 */
2748IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrInvlpg(PVMCPU pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr)
2749{
2750 VMXVEXITINFO ExitInfo;
2751 RT_ZERO(ExitInfo);
2752 ExitInfo.uReason = VMX_EXIT_INVLPG;
2753 ExitInfo.cbInstr = cbInstr;
2754 ExitInfo.u64Qual = GCPtrPage;
2755 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(ExitInfo.u64Qual));
2756
2757 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2758}
2759
2760
2761/**
2762 * Checks guest control registers, debug registers and MSRs as part of VM-entry.
2763 *
2764 * @param pVCpu The cross context virtual CPU structure.
2765 * @param pszInstr The VMX instruction name (for logging purposes).
2766 */
2767IEM_STATIC int iemVmxVmentryCheckGuestControlRegsMsrs(PVMCPU pVCpu, const char *pszInstr)
2768{
2769 /*
2770 * Guest Control Registers, Debug Registers, and MSRs.
2771 * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs".
2772 */
2773 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2774 const char *const pszFailure = "VM-exit";
2775 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
2776
2777 /* CR0 reserved bits. */
2778 {
2779 /* CR0 MB1 bits. */
2780 uint64_t u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
2781 Assert(!(u64Cr0Fixed0 & (X86_CR0_NW | X86_CR0_CD)));
2782 if (fUnrestrictedGuest)
2783 u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
2784 if ((pVmcs->u64GuestCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0)
2785 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed0);
2786
2787 /* CR0 MBZ bits. */
2788 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
2789 if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1)
2790 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1);
2791
2792 /* Without unrestricted guest support, VT-x supports does not support unpaged protected mode. */
2793 if ( !fUnrestrictedGuest
2794 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)
2795 && !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
2796 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0PgPe);
2797 }
2798
2799 /* CR4 reserved bits. */
2800 {
2801 /* CR4 MB1 bits. */
2802 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
2803 if ((pVmcs->u64GuestCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
2804 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0);
2805
2806 /* CR4 MBZ bits. */
2807 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
2808 if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1)
2809 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1);
2810 }
2811
2812 /* DEBUGCTL MSR. */
2813 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
2814 && (pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL))
2815 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDebugCtl);
2816
2817 /* 64-bit CPU checks. */
2818 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
2819 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2820 {
2821 if (fGstInLongMode)
2822 {
2823 /* PAE must be set. */
2824 if ( (pVmcs->u64GuestCr0.u & X86_CR0_PG)
2825 && (pVmcs->u64GuestCr0.u & X86_CR4_PAE))
2826 { /* likely */ }
2827 else
2828 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPae);
2829 }
2830 else
2831 {
2832 /* PCIDE should not be set. */
2833 if (!(pVmcs->u64GuestCr4.u & X86_CR4_PCIDE))
2834 { /* likely */ }
2835 else
2836 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPcide);
2837 }
2838
2839 /* CR3. */
2840 if (!(pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
2841 { /* likely */ }
2842 else
2843 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr3);
2844
2845 /* DR7. */
2846 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
2847 && (pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK))
2848 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDr7);
2849
2850 /* SYSENTER ESP and SYSENTER EIP. */
2851 if ( X86_IS_CANONICAL(pVmcs->u64GuestSysenterEsp.u)
2852 && X86_IS_CANONICAL(pVmcs->u64GuestSysenterEip.u))
2853 { /* likely */ }
2854 else
2855 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSysenterEspEip);
2856 }
2857
2858 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
2859 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
2860
2861 /* PAT MSR. */
2862 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
2863 && !CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u))
2864 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPatMsr);
2865
2866 /* EFER MSR. */
2867 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
2868 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
2869 && (pVmcs->u64GuestEferMsr.u & ~uValidEferMask))
2870 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsrRsvd);
2871
2872 bool const fGstLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
2873 bool const fGstLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
2874 if ( fGstInLongMode == fGstLma
2875 && ( !(pVmcs->u64GuestCr0.u & X86_CR0_PG)
2876 || fGstLma == fGstLme))
2877 { /* likely */ }
2878 else
2879 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsr);
2880
2881 /* We don't support IA32_BNDCFGS MSR yet. */
2882 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
2883
2884 NOREF(pszInstr);
2885 NOREF(pszFailure);
2886 return VINF_SUCCESS;
2887}
2888
2889
2890/**
2891 * Checks guest segment registers, LDTR and TR as part of VM-entry.
2892 *
2893 * @param pVCpu The cross context virtual CPU structure.
2894 * @param pszInstr The VMX instruction name (for logging purposes).
2895 */
2896IEM_STATIC int iemVmxVmentryCheckGuestSegRegs(PVMCPU pVCpu, const char *pszInstr)
2897{
2898 /*
2899 * Segment registers.
2900 * See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
2901 */
2902 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2903 const char *const pszFailure = "VM-exit";
2904 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);
2905 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
2906 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
2907
2908 /* Selectors. */
2909 if ( !fGstInV86Mode
2910 && !fUnrestrictedGuest
2911 && (pVmcs->GuestSs & X86_SEL_RPL) != (pVmcs->GuestCs & X86_SEL_RPL))
2912 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelCsSsRpl);
2913
2914 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
2915 {
2916 CPUMSELREG SelReg;
2917 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &SelReg);
2918 if (RT_LIKELY(rc == VINF_SUCCESS))
2919 { /* likely */ }
2920 else
2921 return rc;
2922
2923 /*
2924 * Virtual-8086 mode checks.
2925 */
2926 if (fGstInV86Mode)
2927 {
2928 /* Base address. */
2929 if (SelReg.u64Base == (uint64_t)SelReg.Sel << 4)
2930 { /* likely */ }
2931 else
2932 {
2933 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBaseV86(iSegReg);
2934 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2935 }
2936
2937 /* Limit. */
2938 if (SelReg.u32Limit == 0xffff)
2939 { /* likely */ }
2940 else
2941 {
2942 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegLimitV86(iSegReg);
2943 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2944 }
2945
2946 /* Attribute. */
2947 if (SelReg.Attr.u == 0xf3)
2948 { /* likely */ }
2949 else
2950 {
2951 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrV86(iSegReg);
2952 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2953 }
2954
2955 /* We're done; move to checking the next segment. */
2956 continue;
2957 }
2958
2959 /* Checks done by 64-bit CPUs. */
2960 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2961 {
2962 /* Base address. */
2963 if ( iSegReg == X86_SREG_FS
2964 || iSegReg == X86_SREG_GS)
2965 {
2966 if (X86_IS_CANONICAL(SelReg.u64Base))
2967 { /* likely */ }
2968 else
2969 {
2970 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
2971 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2972 }
2973 }
2974 else if (iSegReg == X86_SREG_CS)
2975 {
2976 if (!RT_HI_U32(SelReg.u64Base))
2977 { /* likely */ }
2978 else
2979 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseCs);
2980 }
2981 else
2982 {
2983 if ( SelReg.Attr.n.u1Unusable
2984 || !RT_HI_U32(SelReg.u64Base))
2985 { /* likely */ }
2986 else
2987 {
2988 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
2989 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
2990 }
2991 }
2992 }
2993
2994 /*
2995 * Checks outside Virtual-8086 mode.
2996 */
2997 uint8_t const uSegType = SelReg.Attr.n.u4Type;
2998 uint8_t const fCodeDataSeg = SelReg.Attr.n.u1DescType;
2999 uint8_t const fUsable = !SelReg.Attr.n.u1Unusable;
3000 uint8_t const uDpl = SelReg.Attr.n.u2Dpl;
3001 uint8_t const fPresent = SelReg.Attr.n.u1Present;
3002 uint8_t const uGranularity = SelReg.Attr.n.u1Granularity;
3003 uint8_t const uDefBig = SelReg.Attr.n.u1DefBig;
3004 uint8_t const fSegLong = SelReg.Attr.n.u1Long;
3005
3006 /* Code or usable segment. */
3007 if ( iSegReg == X86_SREG_CS
3008 || fUsable)
3009 {
3010 /* Reserved bits (bits 31:17 and bits 11:8). */
3011 if (!(SelReg.Attr.u & 0xfffe0f00))
3012 { /* likely */ }
3013 else
3014 {
3015 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrRsvd(iSegReg);
3016 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3017 }
3018
3019 /* Descriptor type. */
3020 if (fCodeDataSeg)
3021 { /* likely */ }
3022 else
3023 {
3024 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDescType(iSegReg);
3025 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3026 }
3027
3028 /* Present. */
3029 if (fPresent)
3030 { /* likely */ }
3031 else
3032 {
3033 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrPresent(iSegReg);
3034 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3035 }
3036
3037 /* Granularity. */
3038 if ( ((SelReg.u32Limit & 0x00000fff) == 0x00000fff || !uGranularity)
3039 && ((SelReg.u32Limit & 0xfff00000) == 0x00000000 || uGranularity))
3040 { /* likely */ }
3041 else
3042 {
3043 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrGran(iSegReg);
3044 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3045 }
3046 }
3047
3048 if (iSegReg == X86_SREG_CS)
3049 {
3050 /* Segment Type and DPL. */
3051 if ( uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
3052 && fUnrestrictedGuest)
3053 {
3054 if (uDpl == 0)
3055 { /* likely */ }
3056 else
3057 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplZero);
3058 }
3059 else if ( uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_ACCESSED)
3060 || uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
3061 {
3062 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
3063 if (uDpl == AttrSs.n.u2Dpl)
3064 { /* likely */ }
3065 else
3066 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplEqSs);
3067 }
3068 else if ((uSegType & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
3069 == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
3070 {
3071 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
3072 if (uDpl <= AttrSs.n.u2Dpl)
3073 { /* likely */ }
3074 else
3075 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplLtSs);
3076 }
3077 else
3078 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsType);
3079
3080 /* Def/Big. */
3081 if ( fGstInLongMode
3082 && fSegLong)
3083 {
3084 if (uDefBig == 0)
3085 { /* likely */ }
3086 else
3087 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDefBig);
3088 }
3089 }
3090 else if (iSegReg == X86_SREG_SS)
3091 {
3092 /* Segment Type. */
3093 if ( !fUsable
3094 || uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
3095 || uSegType == (X86_SEL_TYPE_DOWN | X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED))
3096 { /* likely */ }
3097 else
3098 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsType);
3099
3100 /* DPL. */
3101 if (fUnrestrictedGuest)
3102 {
3103 if (uDpl == (SelReg.Sel & X86_SEL_RPL))
3104 { /* likely */ }
3105 else
3106 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplEqRpl);
3107 }
3108 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
3109 if ( AttrCs.n.u4Type == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
3110 || (pVmcs->u64GuestCr0.u & X86_CR0_PE))
3111 {
3112 if (uDpl == 0)
3113 { /* likely */ }
3114 else
3115 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplZero);
3116 }
3117 }
3118 else
3119 {
3120 /* DS, ES, FS, GS. */
3121 if (fUsable)
3122 {
3123 /* Segment type. */
3124 if (uSegType & X86_SEL_TYPE_ACCESSED)
3125 { /* likely */ }
3126 else
3127 {
3128 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrTypeAcc(iSegReg);
3129 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3130 }
3131
3132 if ( !(uSegType & X86_SEL_TYPE_CODE)
3133 || (uSegType & X86_SEL_TYPE_READ))
3134 { /* likely */ }
3135 else
3136 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsTypeRead);
3137
3138 /* DPL. */
3139 if ( !fUnrestrictedGuest
3140 && uSegType <= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
3141 {
3142 if (uDpl >= (SelReg.Sel & X86_SEL_RPL))
3143 { /* likely */ }
3144 else
3145 {
3146 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDplRpl(iSegReg);
3147 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3148 }
3149 }
3150 }
3151 }
3152 }
3153
3154 /*
3155 * LDTR.
3156 */
3157 {
3158 CPUMSELREG Ldtr;
3159 Ldtr.Sel = pVmcs->GuestLdtr;
3160 Ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
3161 Ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
3162 Ldtr.Attr.u = pVmcs->u32GuestLdtrLimit;
3163
3164 if (!Ldtr.Attr.n.u1Unusable)
3165 {
3166 /* Selector. */
3167 if (!(Ldtr.Sel & X86_SEL_LDT))
3168 { /* likely */ }
3169 else
3170 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelLdtr);
3171
3172 /* Base. */
3173 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3174 {
3175 if (X86_IS_CANONICAL(Ldtr.u64Base))
3176 { /* likely */ }
3177 else
3178 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseLdtr);
3179 }
3180
3181 /* Attributes. */
3182 /* Reserved bits (bits 31:17 and bits 11:8). */
3183 if (!(Ldtr.Attr.u & 0xfffe0f00))
3184 { /* likely */ }
3185 else
3186 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrRsvd);
3187
3188 if (Ldtr.Attr.n.u4Type == X86_SEL_TYPE_SYS_LDT)
3189 { /* likely */ }
3190 else
3191 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrType);
3192
3193 if (!Ldtr.Attr.n.u1DescType)
3194 { /* likely */ }
3195 else
3196 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrDescType);
3197
3198 if (Ldtr.Attr.n.u1Present)
3199 { /* likely */ }
3200 else
3201 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrPresent);
3202
3203 if ( ((Ldtr.u32Limit & 0x00000fff) == 0x00000fff || !Ldtr.Attr.n.u1Granularity)
3204 && ((Ldtr.u32Limit & 0xfff00000) == 0x00000000 || Ldtr.Attr.n.u1Granularity))
3205 { /* likely */ }
3206 else
3207 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrGran);
3208 }
3209 }
3210
3211 /*
3212 * TR.
3213 */
3214 {
3215 CPUMSELREG Tr;
3216 Tr.Sel = pVmcs->GuestTr;
3217 Tr.u32Limit = pVmcs->u32GuestTrLimit;
3218 Tr.u64Base = pVmcs->u64GuestTrBase.u;
3219 Tr.Attr.u = pVmcs->u32GuestTrLimit;
3220
3221 /* Selector. */
3222 if (!(Tr.Sel & X86_SEL_LDT))
3223 { /* likely */ }
3224 else
3225 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelTr);
3226
3227 /* Base. */
3228 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3229 {
3230 if (X86_IS_CANONICAL(Tr.u64Base))
3231 { /* likely */ }
3232 else
3233 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseTr);
3234 }
3235
3236 /* Attributes. */
3237 /* Reserved bits (bits 31:17 and bits 11:8). */
3238 if (!(Tr.Attr.u & 0xfffe0f00))
3239 { /* likely */ }
3240 else
3241 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrRsvd);
3242
3243 if (!Tr.Attr.n.u1Unusable)
3244 { /* likely */ }
3245 else
3246 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrUnusable);
3247
3248 if ( Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY
3249 || ( !fGstInLongMode
3250 && Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY))
3251 { /* likely */ }
3252 else
3253 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrType);
3254
3255 if (!Tr.Attr.n.u1DescType)
3256 { /* likely */ }
3257 else
3258 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrDescType);
3259
3260 if (Tr.Attr.n.u1Present)
3261 { /* likely */ }
3262 else
3263 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrPresent);
3264
3265 if ( ((Tr.u32Limit & 0x00000fff) == 0x00000fff || !Tr.Attr.n.u1Granularity)
3266 && ((Tr.u32Limit & 0xfff00000) == 0x00000000 || Tr.Attr.n.u1Granularity))
3267 { /* likely */ }
3268 else
3269 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrGran);
3270 }
3271
3272 NOREF(pszInstr);
3273 NOREF(pszFailure);
3274 return VINF_SUCCESS;
3275}
3276
3277
3278/**
3279 * Checks guest GDTR and IDTR as part of VM-entry.
3280 *
3281 * @param pVCpu The cross context virtual CPU structure.
3282 * @param pszInstr The VMX instruction name (for logging purposes).
3283 */
3284IEM_STATIC int iemVmxVmentryCheckGuestGdtrIdtr(PVMCPU pVCpu, const char *pszInstr)
3285{
3286 /*
3287 * GDTR and IDTR.
3288 * See Intel spec. 26.3.1.3 "Checks on Guest Descriptor-Table Registers".
3289 */
3290 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3291 const char *const pszFailure = "VM-exit";
3292
3293 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3294 {
3295 /* Base. */
3296 if (X86_IS_CANONICAL(pVmcs->u64GuestGdtrBase.u))
3297 { /* likely */ }
3298 else
3299 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrBase);
3300
3301 if (X86_IS_CANONICAL(pVmcs->u64GuestIdtrBase.u))
3302 { /* likely */ }
3303 else
3304 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrBase);
3305 }
3306
3307 /* Limit. */
3308 if (!RT_HI_U16(pVmcs->u32GuestGdtrLimit))
3309 { /* likely */ }
3310 else
3311 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrLimit);
3312
3313 if (!RT_HI_U16(pVmcs->u32GuestIdtrLimit))
3314 { /* likely */ }
3315 else
3316 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrLimit);
3317
3318 NOREF(pszInstr);
3319 NOREF(pszFailure);
3320 return VINF_SUCCESS;
3321}
3322
3323
3324/**
3325 * Checks guest RIP and RFLAGS as part of VM-entry.
3326 *
3327 * @param pVCpu The cross context virtual CPU structure.
3328 * @param pszInstr The VMX instruction name (for logging purposes).
3329 */
3330IEM_STATIC int iemVmxVmentryCheckGuestRipRFlags(PVMCPU pVCpu, const char *pszInstr)
3331{
3332 /*
3333 * RIP and RFLAGS.
3334 * See Intel spec. 26.3.1.4 "Checks on Guest RIP and RFLAGS".
3335 */
3336 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3337 const char *const pszFailure = "VM-exit";
3338 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
3339
3340 /* RIP. */
3341 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3342 {
3343 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
3344 if ( !fGstInLongMode
3345 || !AttrCs.n.u1Long)
3346 {
3347 if (!RT_HI_U32(pVmcs->u64GuestRip.u))
3348 { /* likely */ }
3349 else
3350 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRipRsvd);
3351 }
3352
3353 if ( fGstInLongMode
3354 && AttrCs.n.u1Long)
3355 {
3356 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth == 48); /* Canonical. */
3357 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth < 64
3358 && X86_IS_CANONICAL(pVmcs->u64GuestRip.u))
3359 { /* likely */ }
3360 else
3361 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRip);
3362 }
3363 }
3364
3365 /* RFLAGS (bits 63:22 (or 31:22), bits 15, 5, 3 are reserved, bit 1 MB1). */
3366 uint64_t const uGuestRFlags = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode ? pVmcs->u64GuestRFlags.u
3367 : pVmcs->u64GuestRFlags.s.Lo;
3368 if ( !(uGuestRFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK))
3369 && (uGuestRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK)
3370 { /* likely */ }
3371 else
3372 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsRsvd);
3373
3374 if ( fGstInLongMode
3375 || !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
3376 {
3377 if (!(uGuestRFlags & X86_EFL_VM))
3378 { /* likely */ }
3379 else
3380 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsVm);
3381 }
3382
3383 if ( VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo)
3384 && VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo) == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3385 {
3386 if (uGuestRFlags & X86_EFL_IF)
3387 { /* likely */ }
3388 else
3389 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsIf);
3390 }
3391
3392 NOREF(pszInstr);
3393 NOREF(pszFailure);
3394 return VINF_SUCCESS;
3395}
3396
3397
3398/**
3399 * Checks guest non-register state as part of VM-entry.
3400 *
3401 * @param pVCpu The cross context virtual CPU structure.
3402 * @param pszInstr The VMX instruction name (for logging purposes).
3403 */
3404IEM_STATIC int iemVmxVmentryCheckGuestNonRegState(PVMCPU pVCpu, const char *pszInstr)
3405{
3406 /*
3407 * Guest non-register state.
3408 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
3409 */
3410 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3411 const char *const pszFailure = "VM-exit";
3412
3413 /*
3414 * Activity state.
3415 */
3416 uint64_t const u64GuestVmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
3417 uint32_t const fActivityStateMask = RT_BF_GET(u64GuestVmxMiscMsr, VMX_BF_MISC_ACTIVITY_STATES);
3418 if (!(pVmcs->u32GuestActivityState & fActivityStateMask))
3419 { /* likely */ }
3420 else
3421 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateRsvd);
3422
3423 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
3424 if ( !AttrSs.n.u2Dpl
3425 || pVmcs->u32GuestActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT)
3426 { /* likely */ }
3427 else
3428 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateSsDpl);
3429
3430 if ( pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI
3431 || pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
3432 {
3433 if (pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE)
3434 { /* likely */ }
3435 else
3436 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateStiMovSs);
3437 }
3438
3439 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
3440 {
3441 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
3442 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(pVmcs->u32EntryIntInfo);
3443 AssertCompile(VMX_V_GUEST_ACTIVITY_STATE_MASK == (VMX_VMCS_GUEST_ACTIVITY_HLT | VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN));
3444 switch (pVmcs->u32GuestActivityState)
3445 {
3446 case VMX_VMCS_GUEST_ACTIVITY_HLT:
3447 {
3448 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT
3449 || uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
3450 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
3451 && ( uVector == X86_XCPT_DB
3452 || uVector == X86_XCPT_MC))
3453 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
3454 && uVector == VMX_ENTRY_INT_INFO_VECTOR_MTF))
3455 { /* likely */ }
3456 else
3457 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateHlt);
3458 break;
3459 }
3460
3461 case VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN:
3462 {
3463 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
3464 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
3465 && uVector == X86_XCPT_MC))
3466 { /* likely */ }
3467 else
3468 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateShutdown);
3469 break;
3470 }
3471
3472 case VMX_VMCS_GUEST_ACTIVITY_ACTIVE:
3473 default:
3474 break;
3475 }
3476 }
3477
3478 /*
3479 * Interruptibility state.
3480 */
3481 if (!(pVmcs->u32GuestIntrState & ~VMX_VMCS_GUEST_INT_STATE_MASK))
3482 { /* likely */ }
3483 else
3484 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRsvd);
3485
3486 if ((pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3487 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3488 { /* likely */ }
3489 else
3490 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateStiMovSs);
3491
3492 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_IF)
3493 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3494 { /* likely */ }
3495 else
3496 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRFlagsSti);
3497
3498 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
3499 {
3500 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
3501 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3502 {
3503 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
3504 { /* likely */ }
3505 else
3506 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateExtInt);
3507 }
3508 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
3509 {
3510 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
3511 { /* likely */ }
3512 else
3513 {
3514 /*
3515 * We don't support injecting NMIs when blocking-by-STI would be in effect.
3516 * We update the VM-exit qualification only when blocking-by-STI is set
3517 * without blocking-by-MovSS being set. Although in practise it does not
3518 * make much difference since the order of checks are implementation defined.
3519 */
3520 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
3521 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_NMI_INJECT);
3522 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateNmi);
3523 }
3524
3525 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
3526 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI))
3527 { /* likely */ }
3528 else
3529 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateVirtNmi);
3530 }
3531 }
3532
3533 /* We don't support SMM yet. So blocking-by-SMIs must not be set. */
3534 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI))
3535 { /* likely */ }
3536 else
3537 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateSmi);
3538
3539 /* We don't support SGX yet. So enclave-interruption must not be set. */
3540 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_ENCLAVE))
3541 { /* likely */ }
3542 else
3543 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateEnclave);
3544
3545 /*
3546 * Pending debug exceptions.
3547 */
3548 uint64_t const uPendingDbgXcpt = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode
3549 ? pVmcs->u64GuestPendingDbgXcpt.u
3550 : pVmcs->u64GuestPendingDbgXcpt.s.Lo;
3551 if (!(uPendingDbgXcpt & ~VMX_VMCS_GUEST_PENDING_DEBUG_VALID_MASK))
3552 { /* likely */ }
3553 else
3554 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRsvd);
3555
3556 if ( (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3557 || pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
3558 {
3559 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_TF)
3560 && !(pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF)
3561 && !(uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
3562 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf);
3563
3564 if ( ( !(pVmcs->u64GuestRFlags.u & X86_EFL_TF)
3565 || (pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF))
3566 && (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
3567 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf);
3568 }
3569
3570 /* We don't support RTM (Real-time Transactional Memory) yet. */
3571 if (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_RTM)
3572 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRtm);
3573
3574 /*
3575 * VMCS link pointer.
3576 */
3577 if (pVmcs->u64VmcsLinkPtr.u != UINT64_C(0xffffffffffffffff))
3578 {
3579 RTGCPHYS const GCPhysShadowVmcs = pVmcs->u64VmcsLinkPtr.u;
3580 /* We don't support SMM yet (so VMCS link pointer cannot be the current VMCS). */
3581 if (GCPhysShadowVmcs != IEM_VMX_GET_CURRENT_VMCS(pVCpu))
3582 { /* likely */ }
3583 else
3584 {
3585 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
3586 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrCurVmcs);
3587 }
3588
3589 /* Validate the address. */
3590 if ( (GCPhysShadowVmcs & X86_PAGE_4K_OFFSET_MASK)
3591 || (GCPhysShadowVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
3592 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysShadowVmcs))
3593 {
3594 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
3595 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmcsLinkPtr);
3596 }
3597
3598 /* Read the VMCS-link pointer from guest memory. */
3599 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs));
3600 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs),
3601 GCPhysShadowVmcs, VMX_V_VMCS_SIZE);
3602 if (RT_FAILURE(rc))
3603 {
3604 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
3605 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys);
3606 }
3607
3608 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
3609 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID)
3610 { /* likely */ }
3611 else
3612 {
3613 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
3614 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrRevId);
3615 }
3616
3617 /* Verify the shadow bit is set if VMCS shadowing is enabled . */
3618 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
3619 || pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.fIsShadowVmcs)
3620 { /* likely */ }
3621 else
3622 {
3623 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
3624 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrShadow);
3625 }
3626
3627 /* Finally update our cache of the guest physical address of the shadow VMCS. */
3628 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs = GCPhysShadowVmcs;
3629 }
3630
3631 NOREF(pszInstr);
3632 NOREF(pszFailure);
3633 return VINF_SUCCESS;
3634}
3635
3636
3637/**
3638 * Checks if the PDPTEs referenced by the nested-guest CR3 are valid as part of
3639 * VM-entry.
3640 *
3641 * @returns @c true if all PDPTEs are valid, @c false otherwise.
3642 * @param pVCpu The cross context virtual CPU structure.
3643 * @param pszInstr The VMX instruction name (for logging purposes).
3644 * @param pVmcs Pointer to the virtual VMCS.
3645 */
3646IEM_STATIC int iemVmxVmentryCheckGuestPdptesForCr3(PVMCPU pVCpu, const char *pszInstr, PVMXVVMCS pVmcs)
3647{
3648 /*
3649 * Check PDPTEs.
3650 * See Intel spec. 4.4.1 "PDPTE Registers".
3651 */
3652 uint64_t const uGuestCr3 = pVmcs->u64GuestCr3.u & X86_CR3_PAE_PAGE_MASK;
3653 const char *const pszFailure = "VM-exit";
3654
3655 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
3656 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uGuestCr3, sizeof(aPdptes));
3657 if (RT_SUCCESS(rc))
3658 {
3659 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
3660 {
3661 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
3662 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
3663 { /* likely */ }
3664 else
3665 {
3666 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
3667 VMXVDIAG const enmDiag = iemVmxGetDiagVmentryPdpteRsvd(iPdpte);
3668 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3669 }
3670 }
3671 }
3672 else
3673 {
3674 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
3675 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys);
3676 }
3677
3678 NOREF(pszFailure);
3679 return rc;
3680}
3681
3682
3683/**
3684 * Checks guest PDPTEs as part of VM-entry.
3685 *
3686 * @param pVCpu The cross context virtual CPU structure.
3687 * @param pszInstr The VMX instruction name (for logging purposes).
3688 */
3689IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPU pVCpu, const char *pszInstr)
3690{
3691 /*
3692 * Guest PDPTEs.
3693 * See Intel spec. 26.3.1.5 "Checks on Guest Page-Directory-Pointer-Table Entries".
3694 */
3695 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3696 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
3697
3698 /* Check PDPTes if the VM-entry is to a guest using PAE paging. */
3699 int rc;
3700 if ( !fGstInLongMode
3701 && (pVmcs->u64GuestCr4.u & X86_CR4_PAE)
3702 && (pVmcs->u64GuestCr0.u & X86_CR0_PG))
3703 {
3704 /*
3705 * We don't support nested-paging for nested-guests yet.
3706 *
3707 * Without nested-paging for nested-guests, PDPTEs in the VMCS are not used,
3708 * rather we need to check the PDPTEs referenced by the guest CR3.
3709 */
3710 rc = iemVmxVmentryCheckGuestPdptesForCr3(pVCpu, pszInstr, pVmcs);
3711 }
3712 else
3713 rc = VINF_SUCCESS;
3714 return rc;
3715}
3716
3717
3718/**
3719 * Checks guest-state as part of VM-entry.
3720 *
3721 * @returns VBox status code.
3722 * @param pVCpu The cross context virtual CPU structure.
3723 * @param pszInstr The VMX instruction name (for logging purposes).
3724 */
3725IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr)
3726{
3727 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
3728 if (RT_SUCCESS(rc))
3729 {
3730 rc = iemVmxVmentryCheckGuestSegRegs(pVCpu, pszInstr);
3731 if (RT_SUCCESS(rc))
3732 {
3733 rc = iemVmxVmentryCheckGuestGdtrIdtr(pVCpu, pszInstr);
3734 if (RT_SUCCESS(rc))
3735 {
3736 rc = iemVmxVmentryCheckGuestRipRFlags(pVCpu, pszInstr);
3737 if (RT_SUCCESS(rc))
3738 {
3739 rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr);
3740 if (RT_SUCCESS(rc))
3741 return iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr);
3742 }
3743 }
3744 }
3745 }
3746 return rc;
3747}
3748
3749
3750/**
3751 * Checks host-state as part of VM-entry.
3752 *
3753 * @returns VBox status code.
3754 * @param pVCpu The cross context virtual CPU structure.
3755 * @param pszInstr The VMX instruction name (for logging purposes).
3756 */
3757IEM_STATIC int iemVmxVmentryCheckHostState(PVMCPU pVCpu, const char *pszInstr)
3758{
3759 /*
3760 * Host Control Registers and MSRs.
3761 * See Intel spec. 26.2.2 "Checks on Host Control Registers and MSRs".
3762 */
3763 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3764 const char * const pszFailure = "VMFail";
3765
3766 /* CR0 reserved bits. */
3767 {
3768 /* CR0 MB1 bits. */
3769 uint64_t const u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
3770 if ((pVmcs->u64HostCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0)
3771 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0);
3772
3773 /* CR0 MBZ bits. */
3774 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
3775 if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1)
3776 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1);
3777 }
3778
3779 /* CR4 reserved bits. */
3780 {
3781 /* CR4 MB1 bits. */
3782 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
3783 if ((pVmcs->u64HostCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
3784 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0);
3785
3786 /* CR4 MBZ bits. */
3787 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
3788 if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1)
3789 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1);
3790 }
3791
3792 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3793 {
3794 /* CR3 reserved bits. */
3795 if (!(pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
3796 { /* likely */ }
3797 else
3798 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr3);
3799
3800 /* SYSENTER ESP and SYSENTER EIP. */
3801 if ( X86_IS_CANONICAL(pVmcs->u64HostSysenterEsp.u)
3802 && X86_IS_CANONICAL(pVmcs->u64HostSysenterEip.u))
3803 { /* likely */ }
3804 else
3805 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSysenterEspEip);
3806 }
3807
3808 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
3809 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PERF_MSR));
3810
3811 /* PAT MSR. */
3812 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
3813 || CPUMIsPatMsrValid(pVmcs->u64HostPatMsr.u))
3814 { /* likely */ }
3815 else
3816 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostPatMsr);
3817
3818 /* EFER MSR. */
3819 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
3820 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
3821 || !(pVmcs->u64HostEferMsr.u & ~uValidEferMask))
3822 { /* likely */ }
3823 else
3824 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsrRsvd);
3825
3826 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
3827 bool const fHostLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
3828 bool const fHostLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
3829 if ( fHostInLongMode == fHostLma
3830 && fHostInLongMode == fHostLme)
3831 { /* likely */ }
3832 else
3833 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsr);
3834
3835 /*
3836 * Host Segment and Descriptor-Table Registers.
3837 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
3838 */
3839 /* Selector RPL and TI. */
3840 if ( !(pVmcs->HostCs & (X86_SEL_RPL | X86_SEL_LDT))
3841 && !(pVmcs->HostSs & (X86_SEL_RPL | X86_SEL_LDT))
3842 && !(pVmcs->HostDs & (X86_SEL_RPL | X86_SEL_LDT))
3843 && !(pVmcs->HostEs & (X86_SEL_RPL | X86_SEL_LDT))
3844 && !(pVmcs->HostFs & (X86_SEL_RPL | X86_SEL_LDT))
3845 && !(pVmcs->HostGs & (X86_SEL_RPL | X86_SEL_LDT))
3846 && !(pVmcs->HostTr & (X86_SEL_RPL | X86_SEL_LDT)))
3847 { /* likely */ }
3848 else
3849 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSel);
3850
3851 /* CS and TR selectors cannot be 0. */
3852 if ( pVmcs->HostCs
3853 && pVmcs->HostTr)
3854 { /* likely */ }
3855 else
3856 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCsTr);
3857
3858 /* SS cannot be 0 if 32-bit host. */
3859 if ( fHostInLongMode
3860 || pVmcs->HostSs)
3861 { /* likely */ }
3862 else
3863 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSs);
3864
3865 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3866 {
3867 /* FS, GS, GDTR, IDTR, TR base address. */
3868 if ( X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
3869 && X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
3870 && X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u)
3871 && X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u)
3872 && X86_IS_CANONICAL(pVmcs->u64HostTrBase.u))
3873 { /* likely */ }
3874 else
3875 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSegBase);
3876 }
3877
3878 /*
3879 * Host address-space size for 64-bit CPUs.
3880 * See Intel spec. 26.2.4 "Checks Related to Address-Space Size".
3881 */
3882 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
3883 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3884 {
3885 bool const fCpuInLongMode = CPUMIsGuestInLongMode(pVCpu);
3886
3887 /* Logical processor in IA-32e mode. */
3888 if (fCpuInLongMode)
3889 {
3890 if (fHostInLongMode)
3891 {
3892 /* PAE must be set. */
3893 if (pVmcs->u64HostCr4.u & X86_CR4_PAE)
3894 { /* likely */ }
3895 else
3896 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pae);
3897
3898 /* RIP must be canonical. */
3899 if (X86_IS_CANONICAL(pVmcs->u64HostRip.u))
3900 { /* likely */ }
3901 else
3902 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRip);
3903 }
3904 else
3905 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostLongMode);
3906 }
3907 else
3908 {
3909 /* Logical processor is outside IA-32e mode. */
3910 if ( !fGstInLongMode
3911 && !fHostInLongMode)
3912 {
3913 /* PCIDE should not be set. */
3914 if (!(pVmcs->u64HostCr4.u & X86_CR4_PCIDE))
3915 { /* likely */ }
3916 else
3917 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pcide);
3918
3919 /* The high 32-bits of RIP MBZ. */
3920 if (!pVmcs->u64HostRip.s.Hi)
3921 { /* likely */ }
3922 else
3923 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRipRsvd);
3924 }
3925 else
3926 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongMode);
3927 }
3928 }
3929 else
3930 {
3931 /* Host address-space size for 32-bit CPUs. */
3932 if ( !fGstInLongMode
3933 && !fHostInLongMode)
3934 { /* likely */ }
3935 else
3936 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongModeNoCpu);
3937 }
3938
3939 NOREF(pszInstr);
3940 NOREF(pszFailure);
3941 return VINF_SUCCESS;
3942}
3943
3944
3945/**
3946 * Checks VM-entry controls fields as part of VM-entry.
3947 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
3948 *
3949 * @returns VBox status code.
3950 * @param pVCpu The cross context virtual CPU structure.
3951 * @param pszInstr The VMX instruction name (for logging purposes).
3952 */
3953IEM_STATIC int iemVmxVmentryCheckEntryCtls(PVMCPU pVCpu, const char *pszInstr)
3954{
3955 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3956 const char * const pszFailure = "VMFail";
3957
3958 /* VM-entry controls. */
3959 VMXCTLSMSR EntryCtls;
3960 EntryCtls.u = CPUMGetGuestIa32VmxEntryCtls(pVCpu);
3961 if (~pVmcs->u32EntryCtls & EntryCtls.n.disallowed0)
3962 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
3963
3964 if (pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1)
3965 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1);
3966
3967 /* Event injection. */
3968 uint32_t const uIntInfo = pVmcs->u32EntryIntInfo;
3969 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VALID))
3970 {
3971 /* Type and vector. */
3972 uint8_t const uType = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_TYPE);
3973 uint8_t const uVector = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VECTOR);
3974 uint8_t const uRsvd = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_RSVD_12_30);
3975 if ( !uRsvd
3976 && HMVmxIsEntryIntInfoTypeValid(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxMonitorTrapFlag, uType)
3977 && HMVmxIsEntryIntInfoVectorValid(uVector, uType))
3978 { /* likely */ }
3979 else
3980 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd);
3981
3982 /* Exception error code. */
3983 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID))
3984 {
3985 /* Delivery possible only in Unrestricted-guest mode when CR0.PE is set. */
3986 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
3987 || (pVmcs->u64GuestCr0.s.Lo & X86_CR0_PE))
3988 { /* likely */ }
3989 else
3990 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodePe);
3991
3992 /* Exceptions that provide an error code. */
3993 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
3994 && ( uVector == X86_XCPT_DF
3995 || uVector == X86_XCPT_TS
3996 || uVector == X86_XCPT_NP
3997 || uVector == X86_XCPT_SS
3998 || uVector == X86_XCPT_GP
3999 || uVector == X86_XCPT_PF
4000 || uVector == X86_XCPT_AC))
4001 { /* likely */ }
4002 else
4003 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec);
4004
4005 /* Exception error-code reserved bits. */
4006 if (!(pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK))
4007 { /* likely */ }
4008 else
4009 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd);
4010
4011 /* Injecting a software interrupt, software exception or privileged software exception. */
4012 if ( uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
4013 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
4014 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
4015 {
4016 /* Instruction length must be in the range 0-15. */
4017 if (pVmcs->u32EntryInstrLen <= VMX_ENTRY_INSTR_LEN_MAX)
4018 { /* likely */ }
4019 else
4020 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLen);
4021
4022 /* Instruction length of 0 is allowed only when its CPU feature is present. */
4023 if ( pVmcs->u32EntryInstrLen == 0
4024 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEntryInjectSoftInt)
4025 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLenZero);
4026 }
4027 }
4028 }
4029
4030 /* VM-entry MSR-load count and VM-entry MSR-load area address. */
4031 if (pVmcs->u32EntryMsrLoadCount)
4032 {
4033 if ( (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
4034 || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4035 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
4036 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad);
4037 }
4038
4039 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)); /* We don't support SMM yet. */
4040 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON)); /* We don't support dual-monitor treatment yet. */
4041
4042 NOREF(pszInstr);
4043 NOREF(pszFailure);
4044 return VINF_SUCCESS;
4045}
4046
4047
4048/**
4049 * Checks VM-exit controls fields as part of VM-entry.
4050 * See Intel spec. 26.2.1.2 "VM-Exit Control Fields".
4051 *
4052 * @returns VBox status code.
4053 * @param pVCpu The cross context virtual CPU structure.
4054 * @param pszInstr The VMX instruction name (for logging purposes).
4055 */
4056IEM_STATIC int iemVmxVmentryCheckExitCtls(PVMCPU pVCpu, const char *pszInstr)
4057{
4058 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4059 const char * const pszFailure = "VMFail";
4060
4061 /* VM-exit controls. */
4062 VMXCTLSMSR ExitCtls;
4063 ExitCtls.u = CPUMGetGuestIa32VmxExitCtls(pVCpu);
4064 if (~pVmcs->u32ExitCtls & ExitCtls.n.disallowed0)
4065 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
4066
4067 if (pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1)
4068 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1);
4069
4070 /* Save preemption timer without activating it. */
4071 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
4072 && (pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
4073 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer);
4074
4075 /* VM-exit MSR-store count and VM-exit MSR-store area address. */
4076 if (pVmcs->u32ExitMsrStoreCount)
4077 {
4078 if ( (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
4079 || (pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4080 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
4081 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore);
4082 }
4083
4084 /* VM-exit MSR-load count and VM-exit MSR-load area address. */
4085 if (pVmcs->u32ExitMsrLoadCount)
4086 {
4087 if ( (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
4088 || (pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4089 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
4090 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad);
4091 }
4092
4093 NOREF(pszInstr);
4094 NOREF(pszFailure);
4095 return VINF_SUCCESS;
4096}
4097
4098
4099/**
4100 * Checks VM-execution controls fields as part of VM-entry.
4101 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
4102 *
4103 * @returns VBox status code.
4104 * @param pVCpu The cross context virtual CPU structure.
4105 * @param pszInstr The VMX instruction name (for logging purposes).
4106 *
4107 * @remarks This may update secondary-processor based VM-execution control fields
4108 * in the current VMCS if necessary.
4109 */
4110IEM_STATIC int iemVmxVmentryCheckExecCtls(PVMCPU pVCpu, const char *pszInstr)
4111{
4112 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4113 const char * const pszFailure = "VMFail";
4114
4115 /* Pin-based VM-execution controls. */
4116 {
4117 VMXCTLSMSR PinCtls;
4118 PinCtls.u = CPUMGetGuestIa32VmxPinbasedCtls(pVCpu);
4119 if (~pVmcs->u32PinCtls & PinCtls.n.disallowed0)
4120 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
4121
4122 if (pVmcs->u32PinCtls & ~PinCtls.n.allowed1)
4123 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1);
4124 }
4125
4126 /* Processor-based VM-execution controls. */
4127 {
4128 VMXCTLSMSR ProcCtls;
4129 ProcCtls.u = CPUMGetGuestIa32VmxProcbasedCtls(pVCpu);
4130 if (~pVmcs->u32ProcCtls & ProcCtls.n.disallowed0)
4131 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
4132
4133 if (pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1)
4134 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1);
4135 }
4136
4137 /* Secondary processor-based VM-execution controls. */
4138 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4139 {
4140 VMXCTLSMSR ProcCtls2;
4141 ProcCtls2.u = CPUMGetGuestIa32VmxProcbasedCtls2(pVCpu);
4142 if (~pVmcs->u32ProcCtls2 & ProcCtls2.n.disallowed0)
4143 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
4144
4145 if (pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1)
4146 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1);
4147 }
4148 else
4149 Assert(!pVmcs->u32ProcCtls2);
4150
4151 /* CR3-target count. */
4152 if (pVmcs->u32Cr3TargetCount <= VMX_V_CR3_TARGET_COUNT)
4153 { /* likely */ }
4154 else
4155 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Cr3TargetCount);
4156
4157 /* IO bitmaps physical addresses. */
4158 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
4159 {
4160 if ( (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK)
4161 || (pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4162 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u))
4163 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA);
4164
4165 if ( (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK)
4166 || (pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4167 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u))
4168 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
4169 }
4170
4171 /* MSR bitmap physical address. */
4172 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
4173 {
4174 RTGCPHYS const GCPhysMsrBitmap = pVmcs->u64AddrMsrBitmap.u;
4175 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
4176 || (GCPhysMsrBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4177 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysMsrBitmap))
4178 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
4179
4180 /* Read the MSR bitmap. */
4181 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
4182 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap),
4183 GCPhysMsrBitmap, VMX_V_MSR_BITMAP_SIZE);
4184 if (RT_FAILURE(rc))
4185 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrBitmapPtrReadPhys);
4186 }
4187
4188 /* TPR shadow related controls. */
4189 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4190 {
4191 /* Virtual-APIC page physical address. */
4192 RTGCPHYS GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
4193 if ( (GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
4194 || (GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4195 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
4196 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
4197
4198 /* Read the Virtual-APIC page. */
4199 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4200 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage),
4201 GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES);
4202 if (RT_FAILURE(rc))
4203 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
4204
4205 /* TPR threshold without virtual-interrupt delivery. */
4206 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4207 && (pVmcs->u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK))
4208 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdRsvd);
4209
4210 /* TPR threshold and VTPR. */
4211 uint8_t const *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4212 uint8_t const u8VTpr = *(pbVirtApic + XAPIC_OFF_TPR);
4213 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
4214 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4215 && RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) > ((u8VTpr >> 4) & UINT32_C(0xf)) /* Bits 4:7 */)
4216 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
4217 }
4218 else
4219 {
4220 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
4221 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4222 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
4223 { /* likely */ }
4224 else
4225 {
4226 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
4227 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicTprShadow);
4228 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4229 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ApicRegVirt);
4230 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4231 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtIntDelivery);
4232 }
4233 }
4234
4235 /* NMI exiting and virtual-NMIs. */
4236 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
4237 && (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
4238 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi);
4239
4240 /* Virtual-NMIs and NMI-window exiting. */
4241 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
4242 && (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4243 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit);
4244
4245 /* Virtualize APIC accesses. */
4246 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
4247 {
4248 /* APIC-access physical address. */
4249 RTGCPHYS GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
4250 if ( (GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
4251 || (GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4252 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
4253 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
4254 }
4255
4256 /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */
4257 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
4258 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
4259 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
4260
4261 /* Virtual-interrupt delivery requires external interrupt exiting. */
4262 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4263 && !(pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
4264 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
4265
4266 /* VPID. */
4267 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID)
4268 || pVmcs->u16Vpid != 0)
4269 { /* likely */ }
4270 else
4271 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Vpid);
4272
4273 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_POSTED_INT)); /* We don't support posted interrupts yet. */
4274 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)); /* We don't support EPT yet. */
4275 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PML)); /* We don't support PML yet. */
4276 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)); /* We don't support Unrestricted-guests yet. */
4277 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMFUNC)); /* We don't support VM functions yet. */
4278 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT_VE)); /* We don't support EPT-violation #VE yet. */
4279 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)); /* We don't support Pause-loop exiting yet. */
4280
4281 /* VMCS shadowing. */
4282 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4283 {
4284 /* VMREAD-bitmap physical address. */
4285 RTGCPHYS GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
4286 if ( ( GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
4287 || ( GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4288 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
4289 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap);
4290
4291 /* VMWRITE-bitmap physical address. */
4292 RTGCPHYS GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
4293 if ( ( GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
4294 || ( GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4295 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
4296 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
4297
4298 /* Read the VMREAD-bitmap. */
4299 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
4300 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap),
4301 GCPhysVmreadBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
4302 if (RT_FAILURE(rc))
4303 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys);
4304
4305 /* Read the VMWRITE-bitmap. */
4306 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap));
4307 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap),
4308 GCPhysVmwriteBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
4309 if (RT_FAILURE(rc))
4310 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys);
4311 }
4312
4313 NOREF(pszInstr);
4314 NOREF(pszFailure);
4315 return VINF_SUCCESS;
4316}
4317
4318
4319/**
4320 * Loads the guest control registers, debug register and some MSRs as part of
4321 * VM-entry.
4322 *
4323 * @param pVCpu The cross context virtual CPU structure.
4324 */
4325IEM_STATIC void iemVmxVmentryLoadGuestControlRegsMsrs(PVMCPU pVCpu)
4326{
4327 /*
4328 * Load guest control registers, debug registers and MSRs.
4329 * See Intel spec. 26.3.2.1 "Loading Guest Control Registers, Debug Registers and MSRs".
4330 */
4331 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4332 uint64_t const uGstCr0 = (pVmcs->u64GuestCr0.u & ~VMX_ENTRY_CR0_IGNORE_MASK)
4333 | (pVCpu->cpum.GstCtx.cr0 & VMX_ENTRY_CR0_IGNORE_MASK);
4334 CPUMSetGuestCR0(pVCpu, uGstCr0);
4335 CPUMSetGuestCR4(pVCpu, pVmcs->u64GuestCr4.u);
4336 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64GuestCr3.u;
4337
4338 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4339 pVCpu->cpum.GstCtx.dr[7] = (pVmcs->u64GuestDr7.u & ~VMX_ENTRY_DR7_MBZ_MASK) | VMX_ENTRY_DR7_MB1_MASK;
4340
4341 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64GuestSysenterEip.s.Lo;
4342 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64GuestSysenterEsp.s.Lo;
4343 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32GuestSysenterCS;
4344
4345 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4346 {
4347 /* FS base and GS base are loaded while loading the rest of the guest segment registers. */
4348
4349 /* EFER MSR. */
4350 if (!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR))
4351 {
4352 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4353 bool const fGstPaging = RT_BOOL(uGstCr0 & X86_CR0_PG);
4354 uint64_t const uHostEfer = pVCpu->cpum.GstCtx.msrEFER;
4355 if (fGstInLongMode)
4356 {
4357 /* If the nested-guest is in long mode, LMA and LME are both set. */
4358 Assert(fGstPaging);
4359 pVCpu->cpum.GstCtx.msrEFER = uHostEfer | (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
4360 }
4361 else
4362 {
4363 /*
4364 * If the nested-guest is outside long mode:
4365 * - With paging: LMA is cleared, LME is cleared.
4366 * - Without paging: LMA is cleared, LME is left unmodified.
4367 */
4368 uint64_t const fLmaLmeMask = MSR_K6_EFER_LMA | (fGstPaging ? MSR_K6_EFER_LME : 0);
4369 pVCpu->cpum.GstCtx.msrEFER = uHostEfer & ~fLmaLmeMask;
4370 }
4371 }
4372 /* else: see below. */
4373 }
4374
4375 /* PAT MSR. */
4376 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4377 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64GuestPatMsr.u;
4378
4379 /* EFER MSR. */
4380 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4381 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64GuestEferMsr.u;
4382
4383 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
4384 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
4385
4386 /* We don't support IA32_BNDCFGS MSR yet. */
4387 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
4388
4389 /* Nothing to do for SMBASE register - We don't support SMM yet. */
4390}
4391
4392
4393/**
4394 * Loads the guest segment registers, GDTR, IDTR, LDTR and TR as part of VM-entry.
4395 *
4396 * @param pVCpu The cross context virtual CPU structure.
4397 */
4398IEM_STATIC void iemVmxVmentryLoadGuestSegRegs(PVMCPU pVCpu)
4399{
4400 /*
4401 * Load guest segment registers, GDTR, IDTR, LDTR and TR.
4402 * See Intel spec. 26.3.2.2 "Loading Guest Segment Registers and Descriptor-Table Registers".
4403 */
4404 /* CS, SS, ES, DS, FS, GS. */
4405 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4406 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
4407 {
4408 PCPUMSELREG pGstSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
4409 CPUMSELREG VmcsSelReg;
4410 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &VmcsSelReg);
4411 AssertRC(rc); NOREF(rc);
4412 if (!(VmcsSelReg.Attr.u & X86DESCATTR_UNUSABLE))
4413 {
4414 pGstSelReg->Sel = VmcsSelReg.Sel;
4415 pGstSelReg->ValidSel = VmcsSelReg.Sel;
4416 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
4417 pGstSelReg->u64Base = VmcsSelReg.u64Base;
4418 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
4419 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
4420 }
4421 else
4422 {
4423 pGstSelReg->Sel = VmcsSelReg.Sel;
4424 pGstSelReg->ValidSel = VmcsSelReg.Sel;
4425 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
4426 switch (iSegReg)
4427 {
4428 case X86_SREG_CS:
4429 pGstSelReg->u64Base = VmcsSelReg.u64Base;
4430 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
4431 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
4432 break;
4433
4434 case X86_SREG_SS:
4435 pGstSelReg->u64Base = VmcsSelReg.u64Base & UINT32_C(0xfffffff0);
4436 pGstSelReg->u32Limit = 0;
4437 pGstSelReg->Attr.u = (VmcsSelReg.Attr.u & X86DESCATTR_DPL) | X86DESCATTR_D | X86DESCATTR_UNUSABLE;
4438 break;
4439
4440 case X86_SREG_ES:
4441 case X86_SREG_DS:
4442 pGstSelReg->u64Base = 0;
4443 pGstSelReg->u32Limit = 0;
4444 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
4445 break;
4446
4447 case X86_SREG_FS:
4448 case X86_SREG_GS:
4449 pGstSelReg->u64Base = VmcsSelReg.u64Base;
4450 pGstSelReg->u32Limit = 0;
4451 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
4452 break;
4453 }
4454 Assert(pGstSelReg->Attr.n.u1Unusable);
4455 }
4456 }
4457
4458 /* LDTR. */
4459 pVCpu->cpum.GstCtx.ldtr.Sel = pVmcs->GuestLdtr;
4460 pVCpu->cpum.GstCtx.ldtr.ValidSel = pVmcs->GuestLdtr;
4461 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4462 if (!(pVmcs->u32GuestLdtrAttr & X86DESCATTR_UNUSABLE))
4463 {
4464 pVCpu->cpum.GstCtx.ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
4465 pVCpu->cpum.GstCtx.ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
4466 pVCpu->cpum.GstCtx.ldtr.Attr.u = pVmcs->u32GuestLdtrAttr;
4467 }
4468 else
4469 {
4470 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
4471 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
4472 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4473 }
4474
4475 /* TR. */
4476 Assert(!(pVmcs->u32GuestTrAttr & X86DESCATTR_UNUSABLE));
4477 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->GuestTr;
4478 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->GuestTr;
4479 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4480 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64GuestTrBase.u;
4481 pVCpu->cpum.GstCtx.tr.u32Limit = pVmcs->u32GuestTrLimit;
4482 pVCpu->cpum.GstCtx.tr.Attr.u = pVmcs->u32GuestTrAttr;
4483
4484 /* GDTR. */
4485 pVCpu->cpum.GstCtx.gdtr.cbGdt = pVmcs->u32GuestGdtrLimit;
4486 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64GuestGdtrBase.u;
4487
4488 /* IDTR. */
4489 pVCpu->cpum.GstCtx.idtr.cbIdt = pVmcs->u32GuestIdtrLimit;
4490 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64GuestIdtrBase.u;
4491}
4492
4493
4494/**
4495 * Loads the guest MSRs from the VM-entry auto-load MSRs as part of VM-entry.
4496 *
4497 * @returns VBox status code.
4498 * @param pVCpu The cross context virtual CPU structure.
4499 * @param pszInstr The VMX instruction name (for logging purposes).
4500 */
4501IEM_STATIC int iemVmxVmentryLoadGuestAutoMsrs(PVMCPU pVCpu, const char *pszInstr)
4502{
4503 /*
4504 * Load guest MSRs.
4505 * See Intel spec. 26.4 "Loading MSRs".
4506 */
4507 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4508 const char *const pszFailure = "VM-exit";
4509
4510 /*
4511 * The VM-entry MSR-load area address need not be a valid guest-physical address if the
4512 * VM-entry MSR load count is 0. If this is the case, bail early without reading it.
4513 * See Intel spec. 24.8.2 "VM-Entry Controls for MSRs".
4514 */
4515 uint32_t const cMsrs = pVmcs->u32EntryMsrLoadCount;
4516 if (!cMsrs)
4517 return VINF_SUCCESS;
4518
4519 /*
4520 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count is
4521 * exceeded including possibly raising #MC exceptions during VMX transition. Our
4522 * implementation shall fail VM-entry with an VMX_EXIT_ERR_MSR_LOAD VM-exit.
4523 */
4524 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
4525 if (fIsMsrCountValid)
4526 { /* likely */ }
4527 else
4528 {
4529 iemVmxVmcsSetExitQual(pVCpu, VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
4530 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadCount);
4531 }
4532
4533 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrEntryMsrLoad.u;
4534 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
4535 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
4536 if (RT_SUCCESS(rc))
4537 {
4538 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
4539 Assert(pMsr);
4540 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
4541 {
4542 if ( !pMsr->u32Reserved
4543 && pMsr->u32Msr != MSR_K8_FS_BASE
4544 && pMsr->u32Msr != MSR_K8_GS_BASE
4545 && pMsr->u32Msr != MSR_K6_EFER
4546 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
4547 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
4548 {
4549 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
4550 if (rcStrict == VINF_SUCCESS)
4551 continue;
4552
4553 /*
4554 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-entry.
4555 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VM-entry failure
4556 * recording the MSR index in the VM-exit qualification (as per the Intel spec.) and indicated
4557 * further by our own, specific diagnostic code. Later, we can try implement handling of the
4558 * MSR in ring-0 if possible, or come up with a better, generic solution.
4559 */
4560 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
4561 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
4562 ? kVmxVDiag_Vmentry_MsrLoadRing3
4563 : kVmxVDiag_Vmentry_MsrLoad;
4564 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4565 }
4566 else
4567 {
4568 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
4569 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadRsvd);
4570 }
4571 }
4572 }
4573 else
4574 {
4575 AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysAutoMsrArea, rc));
4576 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadPtrReadPhys);
4577 }
4578
4579 NOREF(pszInstr);
4580 NOREF(pszFailure);
4581 return VINF_SUCCESS;
4582}
4583
4584
4585/**
4586 * Loads the guest-state non-register state as part of VM-entry.
4587 *
4588 * @returns VBox status code.
4589 * @param pVCpu The cross context virtual CPU structure.
4590 *
4591 * @remarks This must be called only after loading the nested-guest register state
4592 * (especially nested-guest RIP).
4593 */
4594IEM_STATIC void iemVmxVmentryLoadGuestNonRegState(PVMCPU pVCpu)
4595{
4596 /*
4597 * Load guest non-register state.
4598 * See Intel spec. 26.6 "Special Features of VM Entry"
4599 */
4600 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4601 uint32_t const uEntryIntInfo = pVmcs->u32EntryIntInfo;
4602 if (VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
4603 {
4604 /** @todo NSTVMX: Pending debug exceptions. */
4605 Assert(!(pVmcs->u64GuestPendingDbgXcpt.u));
4606
4607 if (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
4608 {
4609 /** @todo NSTVMX: Virtual-NMIs doesn't affect NMI blocking in the normal sense.
4610 * We probably need a different force flag for virtual-NMI
4611 * pending/blocking. */
4612 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI));
4613 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
4614 }
4615 else
4616 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));
4617
4618 if (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
4619 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
4620 else
4621 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
4622
4623 /* SMI blocking is irrelevant. We don't support SMIs yet. */
4624 }
4625
4626 /* Loading PDPTEs will be taken care when we switch modes. We don't support EPT yet. */
4627 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
4628
4629 /* VPID is irrelevant. We don't support VPID yet. */
4630
4631 /* Clear address-range monitoring. */
4632 EMMonitorWaitClear(pVCpu);
4633}
4634
4635
4636/**
4637 * Loads the guest-state as part of VM-entry.
4638 *
4639 * @returns VBox status code.
4640 * @param pVCpu The cross context virtual CPU structure.
4641 * @param pszInstr The VMX instruction name (for logging purposes).
4642 *
4643 * @remarks This must be done after all the necessary steps prior to loading of
4644 * guest-state (e.g. checking various VMCS state).
4645 */
4646IEM_STATIC int iemVmxVmentryLoadGuestState(PVMCPU pVCpu, const char *pszInstr)
4647{
4648 iemVmxVmentryLoadGuestControlRegsMsrs(pVCpu);
4649 iemVmxVmentryLoadGuestSegRegs(pVCpu);
4650
4651 /*
4652 * Load guest RIP, RSP and RFLAGS.
4653 * See Intel spec. 26.3.2.3 "Loading Guest RIP, RSP and RFLAGS".
4654 */
4655 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4656 pVCpu->cpum.GstCtx.rsp = pVmcs->u64GuestRsp.u;
4657 pVCpu->cpum.GstCtx.rip = pVmcs->u64GuestRip.u;
4658 pVCpu->cpum.GstCtx.rflags.u = pVmcs->u64GuestRFlags.u;
4659
4660 iemVmxVmentryLoadGuestNonRegState(pVCpu);
4661
4662 NOREF(pszInstr);
4663 return VINF_SUCCESS;
4664}
4665
4666
4667/**
4668 * Performs event injection (if any) as part of VM-entry.
4669 *
4670 * @param pVCpu The cross context virtual CPU structure.
4671 * @param pszInstr The VMX instruction name (for logging purposes).
4672 */
4673IEM_STATIC int iemVmxVmentryInjectEvent(PVMCPU pVCpu, const char *pszInstr)
4674{
4675 /*
4676 * Inject events.
4677 * See Intel spec. 26.5 "Event Injection".
4678 */
4679 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4680 uint32_t const uEntryIntInfo = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u32EntryIntInfo;
4681 if (VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
4682 {
4683 /*
4684 * The event that is going to be made pending for injection is not subject to VMX intercepts,
4685 * thus we flag ignoring of intercepts. However, recursive exceptions if any during delivery
4686 * of the current event -are- subject to intercepts, hence this flag will be flipped during
4687 * the actually delivery of this event.
4688 */
4689 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = false;
4690
4691 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
4692 if (uType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT)
4693 {
4694 Assert(VMX_ENTRY_INT_INFO_VECTOR(uEntryIntInfo) == VMX_ENTRY_INT_INFO_VECTOR_MTF);
4695 VMCPU_FF_SET(pVCpu, VMCPU_FF_MTF);
4696 return VINF_SUCCESS;
4697 }
4698
4699 int rc = HMVmxEntryIntInfoInjectTrpmEvent(pVCpu, uEntryIntInfo, pVmcs->u32EntryXcptErrCode, pVmcs->u32EntryInstrLen,
4700 pVCpu->cpum.GstCtx.cr2);
4701 AssertRCReturn(rc, rc);
4702 }
4703
4704 NOREF(pszInstr);
4705 return VINF_SUCCESS;
4706}
4707
4708
4709/**
4710 * VMLAUNCH/VMRESUME instruction execution worker.
4711 *
4712 * @returns Strict VBox status code.
4713 * @param pVCpu The cross context virtual CPU structure.
4714 * @param cbInstr The instruction length.
4715 * @param uInstrId The instruction identity (VMXINSTRID_VMLAUNCH or
4716 * VMXINSTRID_VMRESUME).
4717 * @param pExitInfo Pointer to the VM-exit instruction information struct.
4718 * Optional, can be NULL.
4719 *
4720 * @remarks Common VMX instruction checks are already expected to by the caller,
4721 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
4722 */
4723IEM_STATIC VBOXSTRICTRC iemVmxVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId, PCVMXVEXITINFO pExitInfo)
4724{
4725 Assert( uInstrId == VMXINSTRID_VMLAUNCH
4726 || uInstrId == VMXINSTRID_VMRESUME);
4727 const char *pszInstr = uInstrId == VMXINSTRID_VMRESUME ? "vmresume" : "vmlaunch";
4728
4729 /* Nested-guest intercept. */
4730 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4731 {
4732 if (pExitInfo)
4733 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
4734 uint32_t const uExitReason = uInstrId == VMXINSTRID_VMRESUME ? VMX_EXIT_VMRESUME : VMX_EXIT_VMLAUNCH;
4735 return iemVmxVmexitInstrNeedsInfo(pVCpu, uExitReason, uInstrId, cbInstr);
4736 }
4737
4738 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
4739
4740 /* CPL. */
4741 if (pVCpu->iem.s.uCpl > 0)
4742 {
4743 Log(("%s: CPL %u -> #GP(0)\n", pszInstr, pVCpu->iem.s.uCpl));
4744 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_Cpl;
4745 return iemRaiseGeneralProtectionFault0(pVCpu);
4746 }
4747
4748 /* Current VMCS valid. */
4749 if (!IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
4750 {
4751 Log(("%s: VMCS pointer %#RGp invalid -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
4752 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrInvalid;
4753 iemVmxVmFailInvalid(pVCpu);
4754 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4755 return VINF_SUCCESS;
4756 }
4757
4758 /** @todo Distinguish block-by-MOV-SS from block-by-STI. Currently we
4759 * use block-by-STI here which is not quite correct. */
4760 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
4761 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
4762 {
4763 Log(("%s: VM entry with events blocked by MOV SS -> VMFail\n", pszInstr));
4764 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_BlocKMovSS;
4765 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_BLOCK_MOVSS);
4766 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4767 return VINF_SUCCESS;
4768 }
4769
4770 if (uInstrId == VMXINSTRID_VMLAUNCH)
4771 {
4772 /* VMLAUNCH with non-clear VMCS. */
4773 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_STATE_CLEAR)
4774 { /* likely */ }
4775 else
4776 {
4777 Log(("vmlaunch: VMLAUNCH with non-clear VMCS -> VMFail\n"));
4778 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsClear;
4779 iemVmxVmFail(pVCpu, VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS);
4780 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4781 return VINF_SUCCESS;
4782 }
4783 }
4784 else
4785 {
4786 /* VMRESUME with non-launched VMCS. */
4787 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_STATE_LAUNCHED)
4788 { /* likely */ }
4789 else
4790 {
4791 Log(("vmresume: VMRESUME with non-launched VMCS -> VMFail\n"));
4792 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsLaunch;
4793 iemVmxVmFail(pVCpu, VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS);
4794 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4795 return VINF_SUCCESS;
4796 }
4797 }
4798
4799 /*
4800 * Load the current VMCS.
4801 */
4802 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
4803 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs),
4804 IEM_VMX_GET_CURRENT_VMCS(pVCpu), VMX_V_VMCS_SIZE);
4805 if (RT_FAILURE(rc))
4806 {
4807 Log(("%s: Failed to read VMCS at %#RGp, rc=%Rrc\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu), rc));
4808 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrReadPhys;
4809 return rc;
4810 }
4811
4812 /*
4813 * We are allowed to cache VMCS related data structures (such as I/O bitmaps, MSR bitmaps)
4814 * while entering VMX non-root mode. We do some of this while checking VM-execution
4815 * controls. The guest hypervisor should not make assumptions and is cannot expect
4816 * predictable behavior if changes to these structures are made in guest memory after
4817 * executing VMX non-root mode. As far as VirtualBox is concerned, the guest cannot modify
4818 * them anyway as we cache them in host memory. We are trade memory for speed here.
4819 *
4820 * See Intel spec. 24.11.4 "Software Access to Related Structures".
4821 */
4822 rc = iemVmxVmentryCheckExecCtls(pVCpu, pszInstr);
4823 if (RT_SUCCESS(rc))
4824 {
4825 rc = iemVmxVmentryCheckExitCtls(pVCpu, pszInstr);
4826 if (RT_SUCCESS(rc))
4827 {
4828 rc = iemVmxVmentryCheckEntryCtls(pVCpu, pszInstr);
4829 if (RT_SUCCESS(rc))
4830 {
4831 rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr);
4832 if (RT_SUCCESS(rc))
4833 {
4834 /* Save the guest force-flags as VM-exits can occur from this point on. */
4835 iemVmxVmentrySaveForceFlags(pVCpu);
4836
4837 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
4838 if (RT_SUCCESS(rc))
4839 {
4840 rc = iemVmxVmentryLoadGuestState(pVCpu, pszInstr);
4841 if (RT_SUCCESS(rc))
4842 {
4843 rc = iemVmxVmentryLoadGuestAutoMsrs(pVCpu, pszInstr);
4844 if (RT_SUCCESS(rc))
4845 {
4846 Assert(rc != VINF_CPUM_R3_MSR_WRITE);
4847
4848 /* VMLAUNCH instruction must update the VMCS launch state. */
4849 if (uInstrId == VMXINSTRID_VMLAUNCH)
4850 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = VMX_V_VMCS_STATE_LAUNCHED;
4851
4852 /* Perform the VMX transition (PGM updates). */
4853 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
4854 if (rcStrict == VINF_SUCCESS)
4855 { /* likely */ }
4856 else if (RT_SUCCESS(rcStrict))
4857 {
4858 Log3(("%s: iemVmxWorldSwitch returns %Rrc -> Setting passup status\n", pszInstr,
4859 VBOXSTRICTRC_VAL(rcStrict)));
4860 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
4861 }
4862 else
4863 {
4864 Log3(("%s: iemVmxWorldSwitch failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
4865 return rcStrict;
4866 }
4867
4868 /* We've now entered nested-guest execution. */
4869 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true;
4870
4871 /* Now that we've switched page tables, we can inject events if any. */
4872 iemVmxVmentryInjectEvent(pVCpu, pszInstr);
4873
4874 /** @todo NSTVMX: Setup VMX preemption timer */
4875 /** @todo NSTVMX: TPR thresholding. */
4876
4877 return VINF_SUCCESS;
4878 }
4879 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED);
4880 }
4881 }
4882 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED);
4883 }
4884
4885 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_HOST_STATE);
4886 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4887 return VINF_SUCCESS;
4888 }
4889 }
4890 }
4891
4892 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
4893 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
4894 return VINF_SUCCESS;
4895}
4896
4897
4898/**
4899 * Checks whether an RDMSR or WRMSR instruction for the given MSR is intercepted
4900 * (causes a VM-exit) or not.
4901 *
4902 * @returns @c true if the instruction is intercepted, @c false otherwise.
4903 * @param pVCpu The cross context virtual CPU structure.
4904 * @param uExitReason The VM-exit exit reason (VMX_EXIT_RDMSR or
4905 * VMX_EXIT_WRMSR).
4906 * @param idMsr The MSR.
4907 */
4908IEM_STATIC bool iemVmxIsRdmsrWrmsrInterceptSet(PVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr)
4909{
4910 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
4911 Assert( uExitReason == VMX_EXIT_RDMSR
4912 || uExitReason == VMX_EXIT_WRMSR);
4913
4914 /* Consult the MSR bitmap if the feature is supported. */
4915 if (IEM_VMX_IS_PROCCTLS_SET(pVCpu, VMX_PROC_CTLS_USE_MSR_BITMAPS))
4916 {
4917 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
4918 if (uExitReason == VMX_EXIT_RDMSR)
4919 {
4920 VMXMSREXITREAD enmRead;
4921 int rc = HMVmxGetMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, &enmRead,
4922 NULL /* penmWrite */);
4923 AssertRC(rc);
4924 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
4925 return true;
4926 }
4927 else
4928 {
4929 VMXMSREXITWRITE enmWrite;
4930 int rc = HMVmxGetMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, NULL /* penmRead */,
4931 &enmWrite);
4932 AssertRC(rc);
4933 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
4934 return true;
4935 }
4936 return false;
4937 }
4938
4939 /* Without MSR bitmaps, all MSR accesses are intercepted. */
4940 return true;
4941}
4942
4943
4944/**
4945 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field is
4946 * intercepted (causes a VM-exit) or not.
4947 *
4948 * @returns @c true if the instruction is intercepted, @c false otherwise.
4949 * @param pVCpu The cross context virtual CPU structure.
4950 * @param u64FieldEnc The VMCS field encoding.
4951 * @param uExitReason The VM-exit exit reason (VMX_EXIT_VMREAD or
4952 * VMX_EXIT_VMREAD).
4953 */
4954IEM_STATIC bool iemVmxIsVmreadVmwriteInterceptSet(PVMCPU pVCpu, uint32_t uExitReason, uint64_t u64FieldEnc)
4955{
4956 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
4957 Assert( uExitReason == VMX_EXIT_VMREAD
4958 || uExitReason == VMX_EXIT_VMWRITE);
4959
4960 /* Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted. */
4961 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing)
4962 return true;
4963
4964 /*
4965 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE is intercepted.
4966 * This excludes any reserved bits in the valid parts of the field encoding (i.e. bit 12).
4967 */
4968 if (u64FieldEnc & VMX_VMCS_ENC_RSVD_MASK)
4969 return true;
4970
4971 /* Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not. */
4972 uint32_t u32FieldEnc = RT_LO_U32(u64FieldEnc);
4973 Assert(u32FieldEnc >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
4974 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
4975 uint8_t const *pbBitmap = uExitReason == VMX_EXIT_VMREAD
4976 ? (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap)
4977 : (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap);
4978 pbBitmap += (u32FieldEnc >> 3);
4979 if (*pbBitmap & RT_BIT(u32FieldEnc & 7))
4980 return true;
4981
4982 return false;
4983}
4984
4985
4986/**
4987 * VMREAD common (memory/register) instruction execution worker
4988 *
4989 * @returns Strict VBox status code.
4990 * @param pVCpu The cross context virtual CPU structure.
4991 * @param cbInstr The instruction length.
4992 * @param pu64Dst Where to write the VMCS value (only updated when
4993 * VINF_SUCCESS is returned).
4994 * @param u64FieldEnc The VMCS field encoding.
4995 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
4996 * be NULL.
4997 */
4998IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
4999 PCVMXVEXITINFO pExitInfo)
5000{
5001 /* Nested-guest intercept. */
5002 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5003 && iemVmxIsVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMREAD, u64FieldEnc))
5004 {
5005 if (pExitInfo)
5006 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
5007 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMREAD, VMXINSTRID_VMREAD, cbInstr);
5008 }
5009
5010 /* CPL. */
5011 if (pVCpu->iem.s.uCpl > 0)
5012 {
5013 Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5014 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Cpl;
5015 return iemRaiseGeneralProtectionFault0(pVCpu);
5016 }
5017
5018 /* VMCS pointer in root mode. */
5019 if ( IEM_VMX_IS_ROOT_MODE(pVCpu)
5020 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
5021 {
5022 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
5023 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrInvalid;
5024 iemVmxVmFailInvalid(pVCpu);
5025 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5026 return VINF_SUCCESS;
5027 }
5028
5029 /* VMCS-link pointer in non-root mode. */
5030 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5031 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
5032 {
5033 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
5034 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_LinkPtrInvalid;
5035 iemVmxVmFailInvalid(pVCpu);
5036 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5037 return VINF_SUCCESS;
5038 }
5039
5040 /* Supported VMCS field. */
5041 if (iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
5042 {
5043 Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
5044 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_FieldInvalid;
5045 iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
5046 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5047 return VINF_SUCCESS;
5048 }
5049
5050 /*
5051 * Setup reading from the current or shadow VMCS.
5052 */
5053 uint8_t *pbVmcs;
5054 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5055 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
5056 else
5057 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5058 Assert(pbVmcs);
5059
5060 VMXVMCSFIELDENC FieldEnc;
5061 FieldEnc.u = RT_LO_U32(u64FieldEnc);
5062 uint8_t const uWidth = FieldEnc.n.u2Width;
5063 uint8_t const uType = FieldEnc.n.u2Type;
5064 uint8_t const uWidthType = (uWidth << 2) | uType;
5065 uint8_t const uIndex = FieldEnc.n.u8Index;
5066 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
5067 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
5068
5069 /*
5070 * Read the VMCS component based on the field's effective width.
5071 *
5072 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
5073 * indicates high bits (little endian).
5074 *
5075 * Note! The caller is responsible to trim the result and update registers
5076 * or memory locations are required. Here we just zero-extend to the largest
5077 * type (i.e. 64-bits).
5078 */
5079 uint8_t *pbField = pbVmcs + offField;
5080 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
5081 switch (uEffWidth)
5082 {
5083 case VMX_VMCS_ENC_WIDTH_64BIT:
5084 case VMX_VMCS_ENC_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break;
5085 case VMX_VMCS_ENC_WIDTH_32BIT: *pu64Dst = *(uint32_t *)pbField; break;
5086 case VMX_VMCS_ENC_WIDTH_16BIT: *pu64Dst = *(uint16_t *)pbField; break;
5087 }
5088 return VINF_SUCCESS;
5089}
5090
5091
5092/**
5093 * VMREAD (64-bit register) instruction execution worker.
5094 *
5095 * @returns Strict VBox status code.
5096 * @param pVCpu The cross context virtual CPU structure.
5097 * @param cbInstr The instruction length.
5098 * @param pu64Dst Where to store the VMCS field's value.
5099 * @param u64FieldEnc The VMCS field encoding.
5100 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
5101 * be NULL.
5102 */
5103IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
5104 PCVMXVEXITINFO pExitInfo)
5105{
5106 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
5107 if (rcStrict == VINF_SUCCESS)
5108 {
5109 iemVmxVmreadSuccess(pVCpu, cbInstr);
5110 return VINF_SUCCESS;
5111 }
5112
5113 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
5114 return rcStrict;
5115}
5116
5117
5118/**
5119 * VMREAD (32-bit register) instruction execution worker.
5120 *
5121 * @returns Strict VBox status code.
5122 * @param pVCpu The cross context virtual CPU structure.
5123 * @param cbInstr The instruction length.
5124 * @param pu32Dst Where to store the VMCS field's value.
5125 * @param u32FieldEnc The VMCS field encoding.
5126 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
5127 * be NULL.
5128 */
5129IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPU pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint64_t u32FieldEnc,
5130 PCVMXVEXITINFO pExitInfo)
5131{
5132 uint64_t u64Dst;
5133 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u32FieldEnc, pExitInfo);
5134 if (rcStrict == VINF_SUCCESS)
5135 {
5136 *pu32Dst = u64Dst;
5137 iemVmxVmreadSuccess(pVCpu, cbInstr);
5138 return VINF_SUCCESS;
5139 }
5140
5141 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
5142 return rcStrict;
5143}
5144
5145
5146/**
5147 * VMREAD (memory) instruction execution worker.
5148 *
5149 * @returns Strict VBox status code.
5150 * @param pVCpu The cross context virtual CPU structure.
5151 * @param cbInstr The instruction length.
5152 * @param iEffSeg The effective segment register to use with @a u64Val.
5153 * Pass UINT8_MAX if it is a register access.
5154 * @param enmEffAddrMode The effective addressing mode (only used with memory
5155 * operand).
5156 * @param GCPtrDst The guest linear address to store the VMCS field's
5157 * value.
5158 * @param u64FieldEnc The VMCS field encoding.
5159 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
5160 * be NULL.
5161 */
5162IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode,
5163 RTGCPTR GCPtrDst, uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
5164{
5165 uint64_t u64Dst;
5166 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u64FieldEnc, pExitInfo);
5167 if (rcStrict == VINF_SUCCESS)
5168 {
5169 /*
5170 * Write the VMCS field's value to the location specified in guest-memory.
5171 *
5172 * The pointer size depends on the address size (address-size prefix allowed).
5173 * The operand size depends on IA-32e mode (operand-size prefix not allowed).
5174 */
5175 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
5176 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
5177 GCPtrDst &= s_auAddrSizeMasks[enmEffAddrMode];
5178
5179 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5180 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
5181 else
5182 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
5183 if (rcStrict == VINF_SUCCESS)
5184 {
5185 iemVmxVmreadSuccess(pVCpu, cbInstr);
5186 return VINF_SUCCESS;
5187 }
5188
5189 Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
5190 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrMap;
5191 return rcStrict;
5192 }
5193
5194 Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
5195 return rcStrict;
5196}
5197
5198
5199/**
5200 * VMWRITE instruction execution worker.
5201 *
5202 * @returns Strict VBox status code.
5203 * @param pVCpu The cross context virtual CPU structure.
5204 * @param cbInstr The instruction length.
5205 * @param iEffSeg The effective segment register to use with @a u64Val.
5206 * Pass UINT8_MAX if it is a register access.
5207 * @param enmEffAddrMode The effective addressing mode (only used with memory
5208 * operand).
5209 * @param u64Val The value to write (or guest linear address to the
5210 * value), @a iEffSeg will indicate if it's a memory
5211 * operand.
5212 * @param u64FieldEnc The VMCS field encoding.
5213 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
5214 * be NULL.
5215 */
5216IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, uint64_t u64Val,
5217 uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
5218{
5219 /* Nested-guest intercept. */
5220 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5221 && iemVmxIsVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMWRITE, u64FieldEnc))
5222 {
5223 if (pExitInfo)
5224 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
5225 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMWRITE, VMXINSTRID_VMWRITE, cbInstr);
5226 }
5227
5228 /* CPL. */
5229 if (pVCpu->iem.s.uCpl > 0)
5230 {
5231 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5232 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Cpl;
5233 return iemRaiseGeneralProtectionFault0(pVCpu);
5234 }
5235
5236 /* VMCS pointer in root mode. */
5237 if ( IEM_VMX_IS_ROOT_MODE(pVCpu)
5238 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
5239 {
5240 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
5241 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrInvalid;
5242 iemVmxVmFailInvalid(pVCpu);
5243 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5244 return VINF_SUCCESS;
5245 }
5246
5247 /* VMCS-link pointer in non-root mode. */
5248 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5249 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
5250 {
5251 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
5252 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_LinkPtrInvalid;
5253 iemVmxVmFailInvalid(pVCpu);
5254 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5255 return VINF_SUCCESS;
5256 }
5257
5258 /* If the VMWRITE instruction references memory, access the specified memory operand. */
5259 bool const fIsRegOperand = iEffSeg == UINT8_MAX;
5260 if (!fIsRegOperand)
5261 {
5262 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
5263 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
5264 RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[enmEffAddrMode];
5265
5266 /* Read the value from the specified guest memory location. */
5267 VBOXSTRICTRC rcStrict;
5268 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5269 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
5270 else
5271 {
5272 uint32_t u32Val;
5273 rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, iEffSeg, GCPtrVal);
5274 u64Val = u32Val;
5275 }
5276 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5277 {
5278 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
5279 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrMap;
5280 return rcStrict;
5281 }
5282 }
5283 else
5284 Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
5285
5286 /* Supported VMCS field. */
5287 if (!iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
5288 {
5289 Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
5290 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldInvalid;
5291 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
5292 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5293 return VINF_SUCCESS;
5294 }
5295
5296 /* Read-only VMCS field. */
5297 bool const fIsFieldReadOnly = HMVmxIsVmcsFieldReadOnly(u64FieldEnc);
5298 if ( fIsFieldReadOnly
5299 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
5300 {
5301 Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64FieldEnc));
5302 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldRo;
5303 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
5304 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5305 return VINF_SUCCESS;
5306 }
5307
5308 /*
5309 * Setup writing to the current or shadow VMCS.
5310 */
5311 uint8_t *pbVmcs;
5312 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5313 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
5314 else
5315 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5316 Assert(pbVmcs);
5317
5318 VMXVMCSFIELDENC FieldEnc;
5319 FieldEnc.u = RT_LO_U32(u64FieldEnc);
5320 uint8_t const uWidth = FieldEnc.n.u2Width;
5321 uint8_t const uType = FieldEnc.n.u2Type;
5322 uint8_t const uWidthType = (uWidth << 2) | uType;
5323 uint8_t const uIndex = FieldEnc.n.u8Index;
5324 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
5325 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
5326
5327 /*
5328 * Write the VMCS component based on the field's effective width.
5329 *
5330 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
5331 * indicates high bits (little endian).
5332 */
5333 uint8_t *pbField = pbVmcs + offField;
5334 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
5335 switch (uEffWidth)
5336 {
5337 case VMX_VMCS_ENC_WIDTH_64BIT:
5338 case VMX_VMCS_ENC_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
5339 case VMX_VMCS_ENC_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break;
5340 case VMX_VMCS_ENC_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break;
5341 }
5342
5343 iemVmxVmSucceed(pVCpu);
5344 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5345 return VINF_SUCCESS;
5346}
5347
5348
5349/**
5350 * VMCLEAR instruction execution worker.
5351 *
5352 * @returns Strict VBox status code.
5353 * @param pVCpu The cross context virtual CPU structure.
5354 * @param cbInstr The instruction length.
5355 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
5356 * @param GCPtrVmcs The linear address of the VMCS pointer.
5357 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
5358 * be NULL.
5359 *
5360 * @remarks Common VMX instruction checks are already expected to by the caller,
5361 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
5362 */
5363IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
5364 PCVMXVEXITINFO pExitInfo)
5365{
5366 /* Nested-guest intercept. */
5367 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5368 {
5369 if (pExitInfo)
5370 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
5371 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMCLEAR, VMXINSTRID_NONE, cbInstr);
5372 }
5373
5374 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
5375
5376 /* CPL. */
5377 if (pVCpu->iem.s.uCpl > 0)
5378 {
5379 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5380 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Cpl;
5381 return iemRaiseGeneralProtectionFault0(pVCpu);
5382 }
5383
5384 /* Get the VMCS pointer from the location specified by the source memory operand. */
5385 RTGCPHYS GCPhysVmcs;
5386 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
5387 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5388 {
5389 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
5390 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrMap;
5391 return rcStrict;
5392 }
5393
5394 /* VMCS pointer alignment. */
5395 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
5396 {
5397 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
5398 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAlign;
5399 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
5400 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5401 return VINF_SUCCESS;
5402 }
5403
5404 /* VMCS physical-address width limits. */
5405 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5406 {
5407 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
5408 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrWidth;
5409 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
5410 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5411 return VINF_SUCCESS;
5412 }
5413
5414 /* VMCS is not the VMXON region. */
5415 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
5416 {
5417 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
5418 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrVmxon;
5419 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
5420 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5421 return VINF_SUCCESS;
5422 }
5423
5424 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
5425 restriction imposed by our implementation. */
5426 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
5427 {
5428 Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
5429 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAbnormal;
5430 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
5431 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5432 return VINF_SUCCESS;
5433 }
5434
5435 /*
5436 * VMCLEAR allows committing and clearing any valid VMCS pointer.
5437 *
5438 * If the current VMCS is the one being cleared, set its state to 'clear' and commit
5439 * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
5440 * to 'clear'.
5441 */
5442 uint8_t const fVmcsStateClear = VMX_V_VMCS_STATE_CLEAR;
5443 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
5444 {
5445 Assert(GCPhysVmcs != NIL_RTGCPHYS); /* Paranoia. */
5446 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
5447 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsStateClear;
5448 iemVmxCommitCurrentVmcsToMemory(pVCpu);
5449 Assert(!IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
5450 }
5451 else
5452 {
5453 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPtrVmcs + RT_OFFSETOF(VMXVVMCS, fVmcsState),
5454 (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear));
5455 }
5456
5457 iemVmxVmSucceed(pVCpu);
5458 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5459 return rcStrict;
5460}
5461
5462
5463/**
5464 * VMPTRST instruction execution worker.
5465 *
5466 * @returns Strict VBox status code.
5467 * @param pVCpu The cross context virtual CPU structure.
5468 * @param cbInstr The instruction length.
5469 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
5470 * @param GCPtrVmcs The linear address of where to store the current VMCS
5471 * pointer.
5472 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
5473 * be NULL.
5474 *
5475 * @remarks Common VMX instruction checks are already expected to by the caller,
5476 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
5477 */
5478IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
5479 PCVMXVEXITINFO pExitInfo)
5480{
5481 /* Nested-guest intercept. */
5482 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5483 {
5484 if (pExitInfo)
5485 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
5486 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRST, VMXINSTRID_NONE, cbInstr);
5487 }
5488
5489 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
5490
5491 /* CPL. */
5492 if (pVCpu->iem.s.uCpl > 0)
5493 {
5494 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5495 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Cpl;
5496 return iemRaiseGeneralProtectionFault0(pVCpu);
5497 }
5498
5499 /* Set the VMCS pointer to the location specified by the destination memory operand. */
5500 AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
5501 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
5502 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5503 {
5504 iemVmxVmSucceed(pVCpu);
5505 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5506 return rcStrict;
5507 }
5508
5509 Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
5510 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_PtrMap;
5511 return rcStrict;
5512}
5513
5514
5515/**
5516 * VMPTRLD instruction execution worker.
5517 *
5518 * @returns Strict VBox status code.
5519 * @param pVCpu The cross context virtual CPU structure.
5520 * @param cbInstr The instruction length.
5521 * @param GCPtrVmcs The linear address of the current VMCS pointer.
5522 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
5523 * be NULL.
5524 *
5525 * @remarks Common VMX instruction checks are already expected to by the caller,
5526 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
5527 */
5528IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
5529 PCVMXVEXITINFO pExitInfo)
5530{
5531 /* Nested-guest intercept. */
5532 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5533 {
5534 if (pExitInfo)
5535 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
5536 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRLD, VMXINSTRID_NONE, cbInstr);
5537 }
5538
5539 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
5540
5541 /* CPL. */
5542 if (pVCpu->iem.s.uCpl > 0)
5543 {
5544 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5545 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Cpl;
5546 return iemRaiseGeneralProtectionFault0(pVCpu);
5547 }
5548
5549 /* Get the VMCS pointer from the location specified by the source memory operand. */
5550 RTGCPHYS GCPhysVmcs;
5551 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
5552 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5553 {
5554 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
5555 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrMap;
5556 return rcStrict;
5557 }
5558
5559 /* VMCS pointer alignment. */
5560 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
5561 {
5562 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
5563 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAlign;
5564 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
5565 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5566 return VINF_SUCCESS;
5567 }
5568
5569 /* VMCS physical-address width limits. */
5570 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5571 {
5572 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
5573 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrWidth;
5574 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
5575 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5576 return VINF_SUCCESS;
5577 }
5578
5579 /* VMCS is not the VMXON region. */
5580 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
5581 {
5582 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
5583 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrVmxon;
5584 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
5585 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5586 return VINF_SUCCESS;
5587 }
5588
5589 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
5590 restriction imposed by our implementation. */
5591 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
5592 {
5593 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
5594 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAbnormal;
5595 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
5596 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5597 return VINF_SUCCESS;
5598 }
5599
5600 /* Read the VMCS revision ID from the VMCS. */
5601 VMXVMCSREVID VmcsRevId;
5602 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
5603 if (RT_FAILURE(rc))
5604 {
5605 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
5606 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrReadPhys;
5607 return rc;
5608 }
5609
5610 /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
5611 also check VMCS shadowing feature. */
5612 if ( VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
5613 || ( VmcsRevId.n.fIsShadowVmcs
5614 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
5615 {
5616 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
5617 {
5618 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
5619 VmcsRevId.n.u31RevisionId));
5620 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_VmcsRevId;
5621 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
5622 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5623 return VINF_SUCCESS;
5624 }
5625
5626 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
5627 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_ShadowVmcs;
5628 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
5629 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5630 return VINF_SUCCESS;
5631 }
5632
5633 /*
5634 * We only maintain only the current VMCS in our virtual CPU context (CPUMCTX). Therefore,
5635 * VMPTRLD shall always flush any existing current VMCS back to guest memory before loading
5636 * a new VMCS as current.
5637 */
5638 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
5639 {
5640 iemVmxCommitCurrentVmcsToMemory(pVCpu);
5641 IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
5642 }
5643
5644 iemVmxVmSucceed(pVCpu);
5645 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5646 return VINF_SUCCESS;
5647}
5648
5649
5650/**
5651 * VMXON instruction execution worker.
5652 *
5653 * @returns Strict VBox status code.
5654 * @param pVCpu The cross context virtual CPU structure.
5655 * @param cbInstr The instruction length.
5656 * @param iEffSeg The effective segment register to use with @a
5657 * GCPtrVmxon.
5658 * @param GCPtrVmxon The linear address of the VMXON pointer.
5659 * @param pExitInfo Pointer to the VM-exit instruction information struct.
5660 * Optional, can be NULL.
5661 *
5662 * @remarks Common VMX instruction checks are already expected to by the caller,
5663 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
5664 */
5665IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
5666 PCVMXVEXITINFO pExitInfo)
5667{
5668#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
5669 RT_NOREF5(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
5670 return VINF_EM_RAW_EMULATE_INSTR;
5671#else
5672 if (!IEM_VMX_IS_ROOT_MODE(pVCpu))
5673 {
5674 /* CPL. */
5675 if (pVCpu->iem.s.uCpl > 0)
5676 {
5677 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5678 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cpl;
5679 return iemRaiseGeneralProtectionFault0(pVCpu);
5680 }
5681
5682 /* A20M (A20 Masked) mode. */
5683 if (!PGMPhysIsA20Enabled(pVCpu))
5684 {
5685 Log(("vmxon: A20M mode -> #GP(0)\n"));
5686 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_A20M;
5687 return iemRaiseGeneralProtectionFault0(pVCpu);
5688 }
5689
5690 /* CR0. */
5691 {
5692 /* CR0 MB1 bits. */
5693 uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
5694 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
5695 {
5696 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
5697 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed0;
5698 return iemRaiseGeneralProtectionFault0(pVCpu);
5699 }
5700
5701 /* CR0 MBZ bits. */
5702 uint64_t const uCr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
5703 if (pVCpu->cpum.GstCtx.cr0 & ~uCr0Fixed1)
5704 {
5705 Log(("vmxon: CR0 fixed1 bits set -> #GP(0)\n"));
5706 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed1;
5707 return iemRaiseGeneralProtectionFault0(pVCpu);
5708 }
5709 }
5710
5711 /* CR4. */
5712 {
5713 /* CR4 MB1 bits. */
5714 uint64_t const uCr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
5715 if ((pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0) != uCr4Fixed0)
5716 {
5717 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
5718 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed0;
5719 return iemRaiseGeneralProtectionFault0(pVCpu);
5720 }
5721
5722 /* CR4 MBZ bits. */
5723 uint64_t const uCr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
5724 if (pVCpu->cpum.GstCtx.cr4 & ~uCr4Fixed1)
5725 {
5726 Log(("vmxon: CR4 fixed1 bits set -> #GP(0)\n"));
5727 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed1;
5728 return iemRaiseGeneralProtectionFault0(pVCpu);
5729 }
5730 }
5731
5732 /* Feature control MSR's LOCK and VMXON bits. */
5733 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
5734 if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
5735 {
5736 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
5737 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_MsrFeatCtl;
5738 return iemRaiseGeneralProtectionFault0(pVCpu);
5739 }
5740
5741 /* Get the VMXON pointer from the location specified by the source memory operand. */
5742 RTGCPHYS GCPhysVmxon;
5743 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
5744 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5745 {
5746 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
5747 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrMap;
5748 return rcStrict;
5749 }
5750
5751 /* VMXON region pointer alignment. */
5752 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
5753 {
5754 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
5755 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAlign;
5756 iemVmxVmFailInvalid(pVCpu);
5757 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5758 return VINF_SUCCESS;
5759 }
5760
5761 /* VMXON physical-address width limits. */
5762 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5763 {
5764 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
5765 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrWidth;
5766 iemVmxVmFailInvalid(pVCpu);
5767 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5768 return VINF_SUCCESS;
5769 }
5770
5771 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
5772 restriction imposed by our implementation. */
5773 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
5774 {
5775 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
5776 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAbnormal;
5777 iemVmxVmFailInvalid(pVCpu);
5778 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5779 return VINF_SUCCESS;
5780 }
5781
5782 /* Read the VMCS revision ID from the VMXON region. */
5783 VMXVMCSREVID VmcsRevId;
5784 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
5785 if (RT_FAILURE(rc))
5786 {
5787 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
5788 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrReadPhys;
5789 return rc;
5790 }
5791
5792 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
5793 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
5794 {
5795 /* Revision ID mismatch. */
5796 if (!VmcsRevId.n.fIsShadowVmcs)
5797 {
5798 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
5799 VmcsRevId.n.u31RevisionId));
5800 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmcsRevId;
5801 iemVmxVmFailInvalid(pVCpu);
5802 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5803 return VINF_SUCCESS;
5804 }
5805
5806 /* Shadow VMCS disallowed. */
5807 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
5808 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_ShadowVmcs;
5809 iemVmxVmFailInvalid(pVCpu);
5810 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5811 return VINF_SUCCESS;
5812 }
5813
5814 /*
5815 * Record that we're in VMX operation, block INIT, block and disable A20M.
5816 */
5817 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
5818 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
5819 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
5820
5821 /* Clear address-range monitoring. */
5822 EMMonitorWaitClear(pVCpu);
5823 /** @todo NSTVMX: Intel PT. */
5824
5825 iemVmxVmSucceed(pVCpu);
5826 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5827# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
5828 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
5829# else
5830 return VINF_SUCCESS;
5831# endif
5832 }
5833 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5834 {
5835 /* Nested-guest intercept. */
5836 if (pExitInfo)
5837 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
5838 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMXON, VMXINSTRID_NONE, cbInstr);
5839 }
5840
5841 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
5842
5843 /* CPL. */
5844 if (pVCpu->iem.s.uCpl > 0)
5845 {
5846 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5847 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxRootCpl;
5848 return iemRaiseGeneralProtectionFault0(pVCpu);
5849 }
5850
5851 /* VMXON when already in VMX root mode. */
5852 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
5853 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxAlreadyRoot;
5854 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5855 return VINF_SUCCESS;
5856#endif
5857}
5858
5859
5860/**
5861 * Implements 'VMXOFF'.
5862 *
5863 * @remarks Common VMX instruction checks are already expected to by the caller,
5864 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
5865 */
5866IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
5867{
5868# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
5869 RT_NOREF2(pVCpu, cbInstr);
5870 return VINF_EM_RAW_EMULATE_INSTR;
5871# else
5872 /* Nested-guest intercept. */
5873 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5874 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMXOFF, cbInstr);
5875
5876 /* CPL. */
5877 if (pVCpu->iem.s.uCpl > 0)
5878 {
5879 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5880 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxoff_Cpl;
5881 return iemRaiseGeneralProtectionFault0(pVCpu);
5882 }
5883
5884 /* Dual monitor treatment of SMIs and SMM. */
5885 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
5886 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
5887 {
5888 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
5889 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5890 return VINF_SUCCESS;
5891 }
5892
5893 /* Record that we're no longer in VMX root operation, block INIT, block and disable A20M. */
5894 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
5895 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
5896
5897 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
5898 { /** @todo NSTVMX: Unblock SMI. */ }
5899
5900 EMMonitorWaitClear(pVCpu);
5901 /** @todo NSTVMX: Unblock and enable A20M. */
5902
5903 iemVmxVmSucceed(pVCpu);
5904 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5905# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
5906 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
5907# else
5908 return VINF_SUCCESS;
5909# endif
5910# endif
5911}
5912
5913
5914/**
5915 * Implements 'VMXON'.
5916 */
5917IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
5918{
5919 return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
5920}
5921
5922
5923/**
5924 * Implements 'VMLAUNCH'.
5925 */
5926IEM_CIMPL_DEF_0(iemCImpl_vmlaunch)
5927{
5928 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMLAUNCH, NULL /* pExitInfo */);
5929}
5930
5931
5932/**
5933 * Implements 'VMRESUME'.
5934 */
5935IEM_CIMPL_DEF_0(iemCImpl_vmresume)
5936{
5937 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMRESUME, NULL /* pExitInfo */);
5938}
5939
5940
5941/**
5942 * Implements 'VMPTRLD'.
5943 */
5944IEM_CIMPL_DEF_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
5945{
5946 return iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
5947}
5948
5949
5950/**
5951 * Implements 'VMPTRST'.
5952 */
5953IEM_CIMPL_DEF_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
5954{
5955 return iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
5956}
5957
5958
5959/**
5960 * Implements 'VMCLEAR'.
5961 */
5962IEM_CIMPL_DEF_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
5963{
5964 return iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
5965}
5966
5967
5968/**
5969 * Implements 'VMWRITE' register.
5970 */
5971IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64FieldEnc)
5972{
5973 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, IEMMODE_64BIT /* N/A */, u64Val, u64FieldEnc,
5974 NULL /* pExitInfo */);
5975}
5976
5977
5978/**
5979 * Implements 'VMWRITE' memory.
5980 */
5981IEM_CIMPL_DEF_4(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrVal, uint32_t, u64FieldEnc)
5982{
5983 return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrVal, u64FieldEnc, NULL /* pExitInfo */);
5984}
5985
5986
5987/**
5988 * Implements 'VMREAD' 64-bit register.
5989 */
5990IEM_CIMPL_DEF_2(iemCImpl_vmread64_reg, uint64_t *, pu64Dst, uint64_t, u64FieldEnc)
5991{
5992 return iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, NULL /* pExitInfo */);
5993}
5994
5995
5996/**
5997 * Implements 'VMREAD' 32-bit register.
5998 */
5999IEM_CIMPL_DEF_2(iemCImpl_vmread32_reg, uint32_t *, pu32Dst, uint32_t, u32FieldEnc)
6000{
6001 return iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u32FieldEnc, NULL /* pExitInfo */);
6002}
6003
6004
6005/**
6006 * Implements 'VMREAD' memory.
6007 */
6008IEM_CIMPL_DEF_4(iemCImpl_vmread_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrDst, uint32_t, u64FieldEnc)
6009{
6010 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, u64FieldEnc, NULL /* pExitInfo */);
6011}
6012
6013#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6014
6015
6016/**
6017 * Implements 'VMCALL'.
6018 */
6019IEM_CIMPL_DEF_0(iemCImpl_vmcall)
6020{
6021#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6022 /* Nested-guest intercept. */
6023 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6024 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMCALL, cbInstr);
6025#endif
6026
6027 /* Join forces with vmmcall. */
6028 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
6029}
6030
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette