VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 74631

Last change on this file since 74631 was 74630, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 VM-exit bits; Added Mov from CR8 intercept.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 257.6 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 74630 2018-10-05 16:54:25Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
20/** @todo NSTVMX: The following VM-exit intercepts are pending:
21 * VMX_EXIT_XCPT_OR_NMI
22 * VMX_EXIT_EXT_INT
23 * VMX_EXIT_TRIPLE_FAULT
24 * VMX_EXIT_INIT_SIGNAL
25 * VMX_EXIT_SIPI
26 * VMX_EXIT_IO_SMI
27 * VMX_EXIT_SMI
28 * VMX_EXIT_INT_WINDOW
29 * VMX_EXIT_NMI_WINDOW
30 * VMX_EXIT_TASK_SWITCH
31 * VMX_EXIT_GETSEC
32 * VMX_EXIT_INVD
33 * VMX_EXIT_RSM
34 * VMX_EXIT_MOV_CRX
35 * VMX_EXIT_MOV_DRX
36 * VMX_EXIT_IO_INSTR
37 * VMX_EXIT_MWAIT
38 * VMX_EXIT_MTF
39 * VMX_EXIT_MONITOR (APIC access VM-exit caused by MONITOR pending)
40 * VMX_EXIT_PAUSE
41 * VMX_EXIT_ERR_MACHINE_CHECK
42 * VMX_EXIT_TPR_BELOW_THRESHOLD
43 * VMX_EXIT_APIC_ACCESS
44 * VMX_EXIT_VIRTUALIZED_EOI
45 * VMX_EXIT_EPT_VIOLATION
46 * VMX_EXIT_EPT_MISCONFIG
47 * VMX_EXIT_INVEPT
48 * VMX_EXIT_PREEMPT_TIMER
49 * VMX_EXIT_INVVPID
50 * VMX_EXIT_WBINVD
51 * VMX_EXIT_XSETBV
52 * VMX_EXIT_APIC_WRITE
53 * VMX_EXIT_RDRAND
54 * VMX_EXIT_VMFUNC
55 * VMX_EXIT_ENCLS
56 * VMX_EXIT_RDSEED
57 * VMX_EXIT_PML_FULL
58 * VMX_EXIT_XSAVES
59 * VMX_EXIT_XRSTORS
60 */
61
62/**
63 * Map of VMCS field encodings to their virtual-VMCS structure offsets.
64 *
65 * The first array dimension is VMCS field encoding of Width OR'ed with Type and the
66 * second dimension is the Index, see VMXVMCSFIELDENC.
67 */
68uint16_t const g_aoffVmcsMap[16][VMX_V_VMCS_MAX_INDEX + 1] =
69{
70 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
71 {
72 /* 0 */ RT_OFFSETOF(VMXVVMCS, u16Vpid),
73 /* 1 */ RT_OFFSETOF(VMXVVMCS, u16PostIntNotifyVector),
74 /* 2 */ RT_OFFSETOF(VMXVVMCS, u16EptpIndex),
75 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
76 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
77 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
78 },
79 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
80 {
81 /* 0-7 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
82 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
83 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
84 /* 24-25 */ UINT16_MAX, UINT16_MAX
85 },
86 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
87 {
88 /* 0 */ RT_OFFSETOF(VMXVVMCS, GuestEs),
89 /* 1 */ RT_OFFSETOF(VMXVVMCS, GuestCs),
90 /* 2 */ RT_OFFSETOF(VMXVVMCS, GuestSs),
91 /* 3 */ RT_OFFSETOF(VMXVVMCS, GuestDs),
92 /* 4 */ RT_OFFSETOF(VMXVVMCS, GuestFs),
93 /* 5 */ RT_OFFSETOF(VMXVVMCS, GuestGs),
94 /* 6 */ RT_OFFSETOF(VMXVVMCS, GuestLdtr),
95 /* 7 */ RT_OFFSETOF(VMXVVMCS, GuestTr),
96 /* 8 */ RT_OFFSETOF(VMXVVMCS, u16GuestIntStatus),
97 /* 9 */ RT_OFFSETOF(VMXVVMCS, u16PmlIndex),
98 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
99 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
100 },
101 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
102 {
103 /* 0 */ RT_OFFSETOF(VMXVVMCS, HostEs),
104 /* 1 */ RT_OFFSETOF(VMXVVMCS, HostCs),
105 /* 2 */ RT_OFFSETOF(VMXVVMCS, HostSs),
106 /* 3 */ RT_OFFSETOF(VMXVVMCS, HostDs),
107 /* 4 */ RT_OFFSETOF(VMXVVMCS, HostFs),
108 /* 5 */ RT_OFFSETOF(VMXVVMCS, HostGs),
109 /* 6 */ RT_OFFSETOF(VMXVVMCS, HostTr),
110 /* 7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
111 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
112 /* 23-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
113 },
114 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
115 {
116 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapA),
117 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
118 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
119 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64AddrExitMsrStore),
120 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64AddrExitMsrLoad),
121 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64AddrEntryMsrLoad),
122 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
123 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64AddrPml),
124 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64TscOffset),
125 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64AddrVirtApic),
126 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64AddrApicAccess),
127 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64AddrPostedIntDesc),
128 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64VmFuncCtls),
129 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64EptpPtr),
130 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap0),
131 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap1),
132 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap2),
133 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64EoiExitBitmap3),
134 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64AddrEptpList),
135 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmreadBitmap),
136 /* 20 */ RT_OFFSETOF(VMXVVMCS, u64AddrVmwriteBitmap),
137 /* 21 */ RT_OFFSETOF(VMXVVMCS, u64AddrXcptVeInfo),
138 /* 22 */ RT_OFFSETOF(VMXVVMCS, u64AddrXssBitmap),
139 /* 23 */ RT_OFFSETOF(VMXVVMCS, u64AddrEnclsBitmap),
140 /* 24 */ UINT16_MAX,
141 /* 25 */ RT_OFFSETOF(VMXVVMCS, u64TscMultiplier)
142 },
143 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
144 {
145 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64RoGuestPhysAddr),
146 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
147 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
148 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
149 /* 25 */ UINT16_MAX
150 },
151 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
152 {
153 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
154 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
155 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestPatMsr),
156 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEferMsr),
157 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
158 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte0),
159 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte1),
160 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte2),
161 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte3),
162 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
163 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
164 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
165 },
166 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
167 {
168 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostPatMsr),
169 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostEferMsr),
170 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
171 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
172 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
173 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
174 },
175 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
176 {
177 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32PinCtls),
178 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls),
179 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32XcptBitmap),
180 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMask),
181 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMatch),
182 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32Cr3TargetCount),
183 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32ExitCtls),
184 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
185 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
186 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32EntryCtls),
187 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
188 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32EntryIntInfo),
189 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
190 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32EntryInstrLen),
191 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32TprThreshold),
192 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls2),
193 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32PleGap),
194 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32PleWindow),
195 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
196 },
197 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
198 {
199 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32RoVmInstrError),
200 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32RoExitReason),
201 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32RoExitIntInfo),
202 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32RoExitErrCode),
203 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
204 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
205 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32RoExitInstrLen),
206 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32RoExitInstrInfo),
207 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
208 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
209 /* 24-25 */ UINT16_MAX, UINT16_MAX
210 },
211 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
212 {
213 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
214 /* 1 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsLimit),
215 /* 2 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsLimit),
216 /* 3 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsLimit),
217 /* 4 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
218 /* 5 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsLimit),
219 /* 6 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsLimit),
220 /* 7 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
221 /* 8 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrLimit),
222 /* 9 */ RT_OFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
223 /* 10 */ RT_OFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
224 /* 11 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsAttr),
225 /* 12 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsAttr),
226 /* 13 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsAttr),
227 /* 14 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsAttr),
228 /* 15 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsAttr),
229 /* 16 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsAttr),
230 /* 17 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
231 /* 18 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrAttr),
232 /* 19 */ RT_OFFSETOF(VMXVVMCS, u32GuestIntrState),
233 /* 20 */ RT_OFFSETOF(VMXVVMCS, u32GuestActivityState),
234 /* 21 */ RT_OFFSETOF(VMXVVMCS, u32GuestSmBase),
235 /* 22 */ RT_OFFSETOF(VMXVVMCS, u32GuestSysenterCS),
236 /* 23 */ RT_OFFSETOF(VMXVVMCS, u32PreemptTimer),
237 /* 24-25 */ UINT16_MAX, UINT16_MAX
238 },
239 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
240 {
241 /* 0 */ RT_OFFSETOF(VMXVVMCS, u32HostSysenterCs),
242 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
243 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
244 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
245 /* 25 */ UINT16_MAX
246 },
247 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_CONTROL: */
248 {
249 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64Cr0Mask),
250 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64Cr4Mask),
251 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
252 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
253 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target0),
254 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target1),
255 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target2),
256 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target3),
257 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
258 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
259 /* 24-25 */ UINT16_MAX, UINT16_MAX
260 },
261 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
262 {
263 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64RoExitQual),
264 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64RoIoRcx),
265 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64RoIoRsi),
266 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64RoIoRdi),
267 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64RoIoRip),
268 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64RoGuestLinearAddr),
269 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
270 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
271 /* 22-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
272 },
273 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
274 {
275 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr0),
276 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr3),
277 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr4),
278 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEsBase),
279 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64GuestCsBase),
280 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64GuestSsBase),
281 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64GuestDsBase),
282 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64GuestFsBase),
283 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64GuestGsBase),
284 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64GuestLdtrBase),
285 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64GuestTrBase),
286 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64GuestGdtrBase),
287 /* 12 */ RT_OFFSETOF(VMXVVMCS, u64GuestIdtrBase),
288 /* 13 */ RT_OFFSETOF(VMXVVMCS, u64GuestDr7),
289 /* 14 */ RT_OFFSETOF(VMXVVMCS, u64GuestRsp),
290 /* 15 */ RT_OFFSETOF(VMXVVMCS, u64GuestRip),
291 /* 16 */ RT_OFFSETOF(VMXVVMCS, u64GuestRFlags),
292 /* 17 */ RT_OFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpt),
293 /* 18 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
294 /* 19 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEip),
295 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
296 },
297 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_HOST_STATE: */
298 {
299 /* 0 */ RT_OFFSETOF(VMXVVMCS, u64HostCr0),
300 /* 1 */ RT_OFFSETOF(VMXVVMCS, u64HostCr3),
301 /* 2 */ RT_OFFSETOF(VMXVVMCS, u64HostCr4),
302 /* 3 */ RT_OFFSETOF(VMXVVMCS, u64HostFsBase),
303 /* 4 */ RT_OFFSETOF(VMXVVMCS, u64HostGsBase),
304 /* 5 */ RT_OFFSETOF(VMXVVMCS, u64HostTrBase),
305 /* 6 */ RT_OFFSETOF(VMXVVMCS, u64HostGdtrBase),
306 /* 7 */ RT_OFFSETOF(VMXVVMCS, u64HostIdtrBase),
307 /* 8 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEsp),
308 /* 9 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEip),
309 /* 10 */ RT_OFFSETOF(VMXVVMCS, u64HostRsp),
310 /* 11 */ RT_OFFSETOF(VMXVVMCS, u64HostRip),
311 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
312 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
313 }
314};
315
316
317/**
318 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
319 * relative offsets.
320 */
321# ifdef IEM_WITH_CODE_TLB
322# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
323# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
324# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
325# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
326# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
327# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
328# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
329# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
330# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
331# else /* !IEM_WITH_CODE_TLB */
332# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
333 do \
334 { \
335 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
336 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
337 } while (0)
338
339# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
340
341# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
342 do \
343 { \
344 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
345 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
346 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
347 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
348 } while (0)
349
350# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
351 do \
352 { \
353 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
354 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
355 } while (0)
356
357# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
358 do \
359 { \
360 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
361 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
362 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
363 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
364 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
365 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
366 } while (0)
367
368# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
369 do \
370 { \
371 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
372 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
373 } while (0)
374
375# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
376 do \
377 { \
378 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
379 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
380 } while (0)
381
382# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
383 do \
384 { \
385 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
386 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
387 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
388 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
389 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
390 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
391 } while (0)
392# endif /* !IEM_WITH_CODE_TLB */
393
394/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
395#define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
396
397/** Whether a shadow VMCS is present for the given VCPU. */
398#define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
399
400/** Gets the VMXON region pointer. */
401#define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
402
403/** Gets the guest-physical address of the current VMCS for the given VCPU. */
404#define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
405
406/** Whether a current VMCS is present for the given VCPU. */
407#define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
408
409/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
410#define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
411 do \
412 { \
413 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
414 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
415 } while (0)
416
417/** Clears any current VMCS for the given VCPU. */
418#define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
419 do \
420 { \
421 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
422 } while (0)
423
424/** Check for VMX instructions requiring to be in VMX operation.
425 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs updating. */
426#define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
427 do \
428 { \
429 if (IEM_VMX_IS_ROOT_MODE(a_pVCpu)) \
430 { /* likely */ } \
431 else \
432 { \
433 Log((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
434 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
435 return iemRaiseUndefinedOpcode(a_pVCpu); \
436 } \
437 } while (0)
438
439/** Marks a VM-entry failure with a diagnostic reason, logs and returns. */
440#define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag) \
441 do \
442 { \
443 Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_VmxDiag), \
444 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
445 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
446 return VERR_VMX_VMENTRY_FAILED; \
447 } while (0)
448
449/** Marks a VM-exit failure with a diagnostic reason, logs and returns. */
450#define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \
451 do \
452 { \
453 Log(("VM-exit failed! uExitReason=%u enmDiag=%u (%s) -> %s\n", (a_uExitReason), (a_VmxDiag), \
454 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
455 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
456 return VERR_VMX_VMEXIT_FAILED; \
457 } while (0)
458
459
460
461/**
462 * Returns whether the given VMCS field is valid and supported by our emulation.
463 *
464 * @param pVCpu The cross context virtual CPU structure.
465 * @param u64FieldEnc The VMCS field encoding.
466 *
467 * @remarks This takes into account the CPU features exposed to the guest.
468 */
469IEM_STATIC bool iemVmxIsVmcsFieldValid(PVMCPU pVCpu, uint64_t u64FieldEnc)
470{
471 uint32_t const uFieldEncHi = RT_HI_U32(u64FieldEnc);
472 uint32_t const uFieldEncLo = RT_LO_U32(u64FieldEnc);
473 if (!uFieldEncHi)
474 { /* likely */ }
475 else
476 return false;
477
478 PCCPUMFEATURES pFeat = IEM_GET_GUEST_CPU_FEATURES(pVCpu);
479 switch (uFieldEncLo)
480 {
481 /*
482 * 16-bit fields.
483 */
484 /* Control fields. */
485 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
486 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
487 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
488
489 /* Guest-state fields. */
490 case VMX_VMCS16_GUEST_ES_SEL:
491 case VMX_VMCS16_GUEST_CS_SEL:
492 case VMX_VMCS16_GUEST_SS_SEL:
493 case VMX_VMCS16_GUEST_DS_SEL:
494 case VMX_VMCS16_GUEST_FS_SEL:
495 case VMX_VMCS16_GUEST_GS_SEL:
496 case VMX_VMCS16_GUEST_LDTR_SEL:
497 case VMX_VMCS16_GUEST_TR_SEL:
498 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
499 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
500
501 /* Host-state fields. */
502 case VMX_VMCS16_HOST_ES_SEL:
503 case VMX_VMCS16_HOST_CS_SEL:
504 case VMX_VMCS16_HOST_SS_SEL:
505 case VMX_VMCS16_HOST_DS_SEL:
506 case VMX_VMCS16_HOST_FS_SEL:
507 case VMX_VMCS16_HOST_GS_SEL:
508 case VMX_VMCS16_HOST_TR_SEL: return true;
509
510 /*
511 * 64-bit fields.
512 */
513 /* Control fields. */
514 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
515 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
516 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
517 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
518 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
519 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
520 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
521 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
522 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
523 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
524 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
525 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
526 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
527 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
528 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
529 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
530 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
531 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
532 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
533 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
534 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
535 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
536 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
537 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
538 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
539 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
540 case VMX_VMCS64_CTRL_EPTP_FULL:
541 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
542 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
543 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
544 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
545 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
546 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
547 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
548 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
549 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
550 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
551 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
552 {
553 uint64_t const uVmFuncMsr = CPUMGetGuestIa32VmxVmFunc(pVCpu);
554 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
555 }
556 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
557 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
558 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
559 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
560 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
561 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
562 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
563 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
564 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
565 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
566 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
567 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
568
569 /* Read-only data fields. */
570 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
571 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
572
573 /* Guest-state fields. */
574 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
575 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
576 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
577 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
578 case VMX_VMCS64_GUEST_PAT_FULL:
579 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
580 case VMX_VMCS64_GUEST_EFER_FULL:
581 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
582 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
583 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
584 case VMX_VMCS64_GUEST_PDPTE0_FULL:
585 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
586 case VMX_VMCS64_GUEST_PDPTE1_FULL:
587 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
588 case VMX_VMCS64_GUEST_PDPTE2_FULL:
589 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
590 case VMX_VMCS64_GUEST_PDPTE3_FULL:
591 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
592 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
593 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
594
595 /* Host-state fields. */
596 case VMX_VMCS64_HOST_PAT_FULL:
597 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
598 case VMX_VMCS64_HOST_EFER_FULL:
599 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
600 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
601 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
602
603 /*
604 * 32-bit fields.
605 */
606 /* Control fields. */
607 case VMX_VMCS32_CTRL_PIN_EXEC:
608 case VMX_VMCS32_CTRL_PROC_EXEC:
609 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
610 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
611 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
612 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
613 case VMX_VMCS32_CTRL_EXIT:
614 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
615 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
616 case VMX_VMCS32_CTRL_ENTRY:
617 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
618 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
619 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
620 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
621 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
622 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
623 case VMX_VMCS32_CTRL_PLE_GAP:
624 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
625
626 /* Read-only data fields. */
627 case VMX_VMCS32_RO_VM_INSTR_ERROR:
628 case VMX_VMCS32_RO_EXIT_REASON:
629 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
630 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
631 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
632 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
633 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
634 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
635
636 /* Guest-state fields. */
637 case VMX_VMCS32_GUEST_ES_LIMIT:
638 case VMX_VMCS32_GUEST_CS_LIMIT:
639 case VMX_VMCS32_GUEST_SS_LIMIT:
640 case VMX_VMCS32_GUEST_DS_LIMIT:
641 case VMX_VMCS32_GUEST_FS_LIMIT:
642 case VMX_VMCS32_GUEST_GS_LIMIT:
643 case VMX_VMCS32_GUEST_LDTR_LIMIT:
644 case VMX_VMCS32_GUEST_TR_LIMIT:
645 case VMX_VMCS32_GUEST_GDTR_LIMIT:
646 case VMX_VMCS32_GUEST_IDTR_LIMIT:
647 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
648 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
649 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
650 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
651 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
652 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
653 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
654 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
655 case VMX_VMCS32_GUEST_INT_STATE:
656 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
657 case VMX_VMCS32_GUEST_SMBASE:
658 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
659 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
660
661 /* Host-state fields. */
662 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
663
664 /*
665 * Natural-width fields.
666 */
667 /* Control fields. */
668 case VMX_VMCS_CTRL_CR0_MASK:
669 case VMX_VMCS_CTRL_CR4_MASK:
670 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
671 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
672 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
673 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
674 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
675 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
676
677 /* Read-only data fields. */
678 case VMX_VMCS_RO_EXIT_QUALIFICATION:
679 case VMX_VMCS_RO_IO_RCX:
680 case VMX_VMCS_RO_IO_RSX:
681 case VMX_VMCS_RO_IO_RDI:
682 case VMX_VMCS_RO_IO_RIP:
683 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
684
685 /* Guest-state fields. */
686 case VMX_VMCS_GUEST_CR0:
687 case VMX_VMCS_GUEST_CR3:
688 case VMX_VMCS_GUEST_CR4:
689 case VMX_VMCS_GUEST_ES_BASE:
690 case VMX_VMCS_GUEST_CS_BASE:
691 case VMX_VMCS_GUEST_SS_BASE:
692 case VMX_VMCS_GUEST_DS_BASE:
693 case VMX_VMCS_GUEST_FS_BASE:
694 case VMX_VMCS_GUEST_GS_BASE:
695 case VMX_VMCS_GUEST_LDTR_BASE:
696 case VMX_VMCS_GUEST_TR_BASE:
697 case VMX_VMCS_GUEST_GDTR_BASE:
698 case VMX_VMCS_GUEST_IDTR_BASE:
699 case VMX_VMCS_GUEST_DR7:
700 case VMX_VMCS_GUEST_RSP:
701 case VMX_VMCS_GUEST_RIP:
702 case VMX_VMCS_GUEST_RFLAGS:
703 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
704 case VMX_VMCS_GUEST_SYSENTER_ESP:
705 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
706
707 /* Host-state fields. */
708 case VMX_VMCS_HOST_CR0:
709 case VMX_VMCS_HOST_CR3:
710 case VMX_VMCS_HOST_CR4:
711 case VMX_VMCS_HOST_FS_BASE:
712 case VMX_VMCS_HOST_GS_BASE:
713 case VMX_VMCS_HOST_TR_BASE:
714 case VMX_VMCS_HOST_GDTR_BASE:
715 case VMX_VMCS_HOST_IDTR_BASE:
716 case VMX_VMCS_HOST_SYSENTER_ESP:
717 case VMX_VMCS_HOST_SYSENTER_EIP:
718 case VMX_VMCS_HOST_RSP:
719 case VMX_VMCS_HOST_RIP: return true;
720 }
721
722 return false;
723}
724
725
726/**
727 * Gets a host selector from the VMCS.
728 *
729 * @param pVmcs Pointer to the virtual VMCS.
730 * @param iSelReg The index of the segment register (X86_SREG_XXX).
731 */
732DECLINLINE(RTSEL) iemVmxVmcsGetHostSelReg(PCVMXVVMCS pVmcs, uint8_t iSegReg)
733{
734 Assert(iSegReg < X86_SREG_COUNT);
735 RTSEL HostSel;
736 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
737 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
738 uint8_t const uWidthType = (uWidth << 2) | uType;
739 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
740 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
741 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
742 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
743 uint8_t const *pbField = pbVmcs + offField;
744 HostSel = *(uint16_t *)pbField;
745 return HostSel;
746}
747
748
749/**
750 * Sets a guest segment register in the VMCS.
751 *
752 * @param pVmcs Pointer to the virtual VMCS.
753 * @param iSegReg The index of the segment register (X86_SREG_XXX).
754 * @param pSelReg Pointer to the segment register.
755 */
756IEM_STATIC void iemVmxVmcsSetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCCPUMSELREG pSelReg)
757{
758 Assert(pSelReg);
759 Assert(iSegReg < X86_SREG_COUNT);
760
761 /* Selector. */
762 {
763 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
764 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
765 uint8_t const uWidthType = (uWidth << 2) | uType;
766 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
767 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
768 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
769 uint8_t *pbVmcs = (uint8_t *)pVmcs;
770 uint8_t *pbField = pbVmcs + offField;
771 *(uint16_t *)pbField = pSelReg->Sel;
772 }
773
774 /* Limit. */
775 {
776 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
777 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
778 uint8_t const uWidthType = (uWidth << 2) | uType;
779 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
780 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
781 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
782 uint8_t *pbVmcs = (uint8_t *)pVmcs;
783 uint8_t *pbField = pbVmcs + offField;
784 *(uint32_t *)pbField = pSelReg->u32Limit;
785 }
786
787 /* Base. */
788 {
789 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
790 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
791 uint8_t const uWidthType = (uWidth << 2) | uType;
792 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
793 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
794 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
795 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
796 uint8_t const *pbField = pbVmcs + offField;
797 *(uint64_t *)pbField = pSelReg->u64Base;
798 }
799
800 /* Attributes. */
801 {
802 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
803 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
804 | X86DESCATTR_UNUSABLE;
805 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
806 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
807 uint8_t const uWidthType = (uWidth << 2) | uType;
808 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
809 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
810 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
811 uint8_t *pbVmcs = (uint8_t *)pVmcs;
812 uint8_t *pbField = pbVmcs + offField;
813 *(uint32_t *)pbField = pSelReg->Attr.u & fValidAttrMask;
814 }
815}
816
817
818/**
819 * Gets a guest segment register from the VMCS.
820 *
821 * @returns VBox status code.
822 * @param pVmcs Pointer to the virtual VMCS.
823 * @param iSegReg The index of the segment register (X86_SREG_XXX).
824 * @param pSelReg Where to store the segment register (only updated when
825 * VINF_SUCCESS is returned).
826 *
827 * @remarks Warning! This does not validate the contents of the retrieved segment
828 * register.
829 */
830IEM_STATIC int iemVmxVmcsGetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCPUMSELREG pSelReg)
831{
832 Assert(pSelReg);
833 Assert(iSegReg < X86_SREG_COUNT);
834
835 /* Selector. */
836 uint16_t u16Sel;
837 {
838 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
839 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
840 uint8_t const uWidthType = (uWidth << 2) | uType;
841 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
842 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
843 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
844 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
845 uint8_t const *pbField = pbVmcs + offField;
846 u16Sel = *(uint16_t *)pbField;
847 }
848
849 /* Limit. */
850 uint32_t u32Limit;
851 {
852 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
853 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
854 uint8_t const uWidthType = (uWidth << 2) | uType;
855 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
856 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
857 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
858 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
859 uint8_t const *pbField = pbVmcs + offField;
860 u32Limit = *(uint32_t *)pbField;
861 }
862
863 /* Base. */
864 uint64_t u64Base;
865 {
866 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
867 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
868 uint8_t const uWidthType = (uWidth << 2) | uType;
869 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
870 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
871 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
872 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
873 uint8_t const *pbField = pbVmcs + offField;
874 u64Base = *(uint64_t *)pbField;
875 /** @todo NSTVMX: Should we zero out high bits here for 32-bit virtual CPUs? */
876 }
877
878 /* Attributes. */
879 uint32_t u32Attr;
880 {
881 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
882 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
883 uint8_t const uWidthType = (uWidth << 2) | uType;
884 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
885 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
886 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
887 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
888 uint8_t const *pbField = pbVmcs + offField;
889 u32Attr = *(uint32_t *)pbField;
890 }
891
892 pSelReg->Sel = u16Sel;
893 pSelReg->ValidSel = u16Sel;
894 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
895 pSelReg->u32Limit = u32Limit;
896 pSelReg->u64Base = u64Base;
897 pSelReg->Attr.u = u32Attr;
898 return VINF_SUCCESS;
899}
900
901
902/**
903 * Gets a CR3 target value from the VMCS.
904 *
905 * @returns VBox status code.
906 * @param pVmcs Pointer to the virtual VMCS.
907 * @param idxCr3Target The index of the CR3-target value to retrieve.
908 * @param puValue Where to store the CR3-target value.
909 */
910DECLINLINE(uint64_t) iemVmxVmcsGetCr3TargetValue(PCVMXVVMCS pVmcs, uint8_t idxCr3Target)
911{
912 Assert(idxCr3Target < VMX_V_CR3_TARGET_COUNT);
913
914 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
915 uint8_t const uType = VMX_VMCS_ENC_TYPE_CONTROL;
916 uint8_t const uWidthType = (uWidth << 2) | uType;
917 uint8_t const uIndex = (idxCr3Target << 1) + RT_BF_GET(VMX_VMCS_CTRL_CR3_TARGET_VAL0, VMX_BF_VMCS_ENC_INDEX);
918 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
919 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
920 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
921 uint8_t const *pbField = pbVmcs + offField;
922 uint64_t const uCr3TargetValue = *(uint64_t *)pbField;
923
924 return uCr3TargetValue;
925}
926
927
928/**
929 * Reads a 32-bit register from the virtual-APIC page at the given offset.
930 *
931 * @returns The register from the virtual-APIC page.
932 * @param pVCpu The cross context virtual CPU structure.
933 * @param offReg The offset of the register being read.
934 */
935DECLINLINE(uint32_t) iemVmxVirtApicReadRaw32(PVMCPU pVCpu, uint8_t offReg)
936{
937 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
938
939 uint8_t const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
940 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
941 uint32_t const uValue = *(const uint32_t *)(pbVirtApic + offReg);
942 return uValue;
943}
944
945
946/**
947 * Masks the nested-guest CR0/CR4 mask subjected to the corresponding guest/host
948 * mask and the read-shadow (CR0/CR4 read).
949 *
950 * @returns The masked CR0/CR4.
951 * @param pVCpu The cross context virtual CPU structure.
952 * @param iCrReg The control register (either CR0 or CR4).
953 * @param uGuestCrX The current guest CR0 or guest CR4.
954 */
955IEM_STATIC uint64_t iemVmxMaskCr0CR4(PVMCPU pVCpu, uint8_t iCrReg, uint64_t uGuestCrX)
956{
957 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
958 Assert(iCrReg == 0 || iCrReg == 4);
959
960 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
961 Assert(pVmcs);
962
963 /*
964 * For each CR0 or CR4 bit owned by the host, the corresponding bit is loaded from the
965 * CR0 read shadow or CR4 read shadow. For each CR0 or CR4 bit that is not owned by the
966 * host, the corresponding bit from the guest CR0 or guest CR4 is loaded.
967 *
968 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
969 */
970 uint64_t fGstHostMask;
971 uint64_t fReadShadow;
972 if (iCrReg == 0)
973 {
974 fGstHostMask = pVmcs->u64Cr0Mask.u;
975 fReadShadow = pVmcs->u64Cr0ReadShadow.u;
976 }
977 else
978 {
979 fGstHostMask = pVmcs->u64Cr4Mask.u;
980 fReadShadow = pVmcs->u64Cr4ReadShadow.u;
981 }
982
983 uint64_t const fMaskedCrX = (fReadShadow & fGstHostMask) | (uGuestCrX & ~fGstHostMask);
984 return fMaskedCrX;
985}
986
987
988/**
989 * Gets VM-exit instruction information along with any displacement for an
990 * instruction VM-exit.
991 *
992 * @returns The VM-exit instruction information.
993 * @param pVCpu The cross context virtual CPU structure.
994 * @param uExitReason The VM-exit reason.
995 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_XXX).
996 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
997 * NULL.
998 */
999IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, PRTGCPTR pGCPtrDisp)
1000{
1001 RTGCPTR GCPtrDisp;
1002 VMXEXITINSTRINFO ExitInstrInfo;
1003 ExitInstrInfo.u = 0;
1004
1005 /*
1006 * Get and parse the ModR/M byte from our decoded opcodes.
1007 */
1008 uint8_t bRm;
1009 uint8_t const offModRm = pVCpu->iem.s.offModRm;
1010 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
1011 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1012 {
1013 /*
1014 * ModR/M indicates register addressing.
1015 *
1016 * The primary/secondary register operands are reported in the iReg1 or iReg2
1017 * fields depending on whether it is a read/write form.
1018 */
1019 uint8_t idxReg1;
1020 uint8_t idxReg2;
1021 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
1022 {
1023 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
1024 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
1025 }
1026 else
1027 {
1028 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
1029 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
1030 }
1031 ExitInstrInfo.All.u2Scaling = 0;
1032 ExitInstrInfo.All.iReg1 = idxReg1;
1033 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
1034 ExitInstrInfo.All.fIsRegOperand = 1;
1035 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
1036 ExitInstrInfo.All.iSegReg = 0;
1037 ExitInstrInfo.All.iIdxReg = 0;
1038 ExitInstrInfo.All.fIdxRegInvalid = 1;
1039 ExitInstrInfo.All.iBaseReg = 0;
1040 ExitInstrInfo.All.fBaseRegInvalid = 1;
1041 ExitInstrInfo.All.iReg2 = idxReg2;
1042
1043 /* Displacement not applicable for register addressing. */
1044 GCPtrDisp = 0;
1045 }
1046 else
1047 {
1048 /*
1049 * ModR/M indicates memory addressing.
1050 */
1051 uint8_t uScale = 0;
1052 bool fBaseRegValid = false;
1053 bool fIdxRegValid = false;
1054 uint8_t iBaseReg = 0;
1055 uint8_t iIdxReg = 0;
1056 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
1057 {
1058 /*
1059 * Parse the ModR/M, displacement for 16-bit addressing mode.
1060 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
1061 */
1062 uint16_t u16Disp = 0;
1063 uint8_t const offDisp = offModRm + sizeof(bRm);
1064 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
1065 {
1066 /* Displacement without any registers. */
1067 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
1068 }
1069 else
1070 {
1071 /* Register (index and base). */
1072 switch (bRm & X86_MODRM_RM_MASK)
1073 {
1074 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
1075 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
1076 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
1077 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
1078 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
1079 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
1080 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
1081 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
1082 }
1083
1084 /* Register + displacement. */
1085 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1086 {
1087 case 0: break;
1088 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
1089 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
1090 default:
1091 {
1092 /* Register addressing, handled at the beginning. */
1093 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
1094 break;
1095 }
1096 }
1097 }
1098
1099 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
1100 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
1101 }
1102 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
1103 {
1104 /*
1105 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
1106 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
1107 */
1108 uint32_t u32Disp = 0;
1109 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
1110 {
1111 /* Displacement without any registers. */
1112 uint8_t const offDisp = offModRm + sizeof(bRm);
1113 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
1114 }
1115 else
1116 {
1117 /* Register (and perhaps scale, index and base). */
1118 uint8_t offDisp = offModRm + sizeof(bRm);
1119 iBaseReg = (bRm & X86_MODRM_RM_MASK);
1120 if (iBaseReg == 4)
1121 {
1122 /* An SIB byte follows the ModR/M byte, parse it. */
1123 uint8_t bSib;
1124 uint8_t const offSib = offModRm + sizeof(bRm);
1125 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
1126
1127 /* A displacement may follow SIB, update its offset. */
1128 offDisp += sizeof(bSib);
1129
1130 /* Get the scale. */
1131 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
1132
1133 /* Get the index register. */
1134 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
1135 fIdxRegValid = RT_BOOL(iIdxReg != 4);
1136
1137 /* Get the base register. */
1138 iBaseReg = bSib & X86_SIB_BASE_MASK;
1139 fBaseRegValid = true;
1140 if (iBaseReg == 5)
1141 {
1142 if ((bRm & X86_MODRM_MOD_MASK) == 0)
1143 {
1144 /* Mod is 0 implies a 32-bit displacement with no base. */
1145 fBaseRegValid = false;
1146 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
1147 }
1148 else
1149 {
1150 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
1151 iBaseReg = X86_GREG_xBP;
1152 }
1153 }
1154 }
1155
1156 /* Register + displacement. */
1157 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1158 {
1159 case 0: /* Handled above */ break;
1160 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
1161 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
1162 default:
1163 {
1164 /* Register addressing, handled at the beginning. */
1165 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
1166 break;
1167 }
1168 }
1169 }
1170
1171 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
1172 }
1173 else
1174 {
1175 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
1176
1177 /*
1178 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
1179 * See Intel instruction spec. 2.2 "IA-32e Mode".
1180 */
1181 uint64_t u64Disp = 0;
1182 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
1183 if (fRipRelativeAddr)
1184 {
1185 /*
1186 * RIP-relative addressing mode.
1187 *
1188 * The displacement is 32-bit signed implying an offset range of +/-2G.
1189 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
1190 */
1191 uint8_t const offDisp = offModRm + sizeof(bRm);
1192 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
1193 }
1194 else
1195 {
1196 uint8_t offDisp = offModRm + sizeof(bRm);
1197
1198 /*
1199 * Register (and perhaps scale, index and base).
1200 *
1201 * REX.B extends the most-significant bit of the base register. However, REX.B
1202 * is ignored while determining whether an SIB follows the opcode. Hence, we
1203 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
1204 *
1205 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
1206 */
1207 iBaseReg = (bRm & X86_MODRM_RM_MASK);
1208 if (iBaseReg == 4)
1209 {
1210 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
1211 uint8_t bSib;
1212 uint8_t const offSib = offModRm + sizeof(bRm);
1213 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
1214
1215 /* Displacement may follow SIB, update its offset. */
1216 offDisp += sizeof(bSib);
1217
1218 /* Get the scale. */
1219 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
1220
1221 /* Get the index. */
1222 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
1223 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
1224
1225 /* Get the base. */
1226 iBaseReg = (bSib & X86_SIB_BASE_MASK);
1227 fBaseRegValid = true;
1228 if (iBaseReg == 5)
1229 {
1230 if ((bRm & X86_MODRM_MOD_MASK) == 0)
1231 {
1232 /* Mod is 0 implies a signed 32-bit displacement with no base. */
1233 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
1234 }
1235 else
1236 {
1237 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
1238 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
1239 }
1240 }
1241 }
1242 iBaseReg |= pVCpu->iem.s.uRexB;
1243
1244 /* Register + displacement. */
1245 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1246 {
1247 case 0: /* Handled above */ break;
1248 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
1249 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
1250 default:
1251 {
1252 /* Register addressing, handled at the beginning. */
1253 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
1254 break;
1255 }
1256 }
1257 }
1258
1259 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
1260 }
1261
1262 /*
1263 * The primary or secondary register operand is reported in iReg2 depending
1264 * on whether the primary operand is in read/write form.
1265 */
1266 uint8_t idxReg2;
1267 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
1268 {
1269 idxReg2 = bRm & X86_MODRM_RM_MASK;
1270 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
1271 idxReg2 |= pVCpu->iem.s.uRexB;
1272 }
1273 else
1274 {
1275 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK;
1276 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
1277 idxReg2 |= pVCpu->iem.s.uRexReg;
1278 }
1279 ExitInstrInfo.All.u2Scaling = uScale;
1280 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */
1281 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
1282 ExitInstrInfo.All.fIsRegOperand = 0;
1283 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
1284 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
1285 ExitInstrInfo.All.iIdxReg = iIdxReg;
1286 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
1287 ExitInstrInfo.All.iBaseReg = iBaseReg;
1288 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
1289 ExitInstrInfo.All.iReg2 = idxReg2;
1290 }
1291
1292 /*
1293 * Handle exceptions to the norm for certain instructions.
1294 * (e.g. some instructions convey an instruction identity in place of iReg2).
1295 */
1296 switch (uExitReason)
1297 {
1298 case VMX_EXIT_GDTR_IDTR_ACCESS:
1299 {
1300 Assert(VMXINSTRID_IS_VALID(uInstrId));
1301 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
1302 ExitInstrInfo.GdtIdt.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
1303 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
1304 break;
1305 }
1306
1307 case VMX_EXIT_LDTR_TR_ACCESS:
1308 {
1309 Assert(VMXINSTRID_IS_VALID(uInstrId));
1310 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
1311 ExitInstrInfo.LdtTr.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
1312 ExitInstrInfo.LdtTr.u2Undef0 = 0;
1313 break;
1314 }
1315
1316 case VMX_EXIT_RDRAND:
1317 case VMX_EXIT_RDSEED:
1318 {
1319 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
1320 break;
1321 }
1322 }
1323
1324 /* Update displacement and return the constructed VM-exit instruction information field. */
1325 if (pGCPtrDisp)
1326 *pGCPtrDisp = GCPtrDisp;
1327
1328 return ExitInstrInfo.u;
1329}
1330
1331
1332/**
1333 * Sets the VM-instruction error VMCS field.
1334 *
1335 * @param pVCpu The cross context virtual CPU structure.
1336 * @param enmInsErr The VM-instruction error.
1337 */
1338DECL_FORCE_INLINE(void) iemVmxVmcsSetVmInstrErr(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1339{
1340 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1341 pVmcs->u32RoVmInstrError = enmInsErr;
1342}
1343
1344
1345/**
1346 * Sets the VM-exit qualification VMCS field.
1347 *
1348 * @param pVCpu The cross context virtual CPU structure.
1349 * @param uExitQual The VM-exit qualification field.
1350 */
1351DECL_FORCE_INLINE(void) iemVmxVmcsSetExitQual(PVMCPU pVCpu, uint64_t uExitQual)
1352{
1353 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1354 pVmcs->u64RoExitQual.u = uExitQual;
1355}
1356
1357
1358/**
1359 * Sets the VM-exit guest-linear address VMCS field.
1360 *
1361 * @param pVCpu The cross context virtual CPU structure.
1362 * @param uGuestLinearAddr The VM-exit guest-linear address field.
1363 */
1364DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestLinearAddr(PVMCPU pVCpu, uint64_t uGuestLinearAddr)
1365{
1366 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1367 pVmcs->u64RoGuestLinearAddr.u = uGuestLinearAddr;
1368}
1369
1370
1371/**
1372 * Sets the VM-exit guest-physical address VMCS field.
1373 *
1374 * @param pVCpu The cross context virtual CPU structure.
1375 * @param uGuestPhysAddr The VM-exit guest-physical address field.
1376 */
1377DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestPhysAddr(PVMCPU pVCpu, uint64_t uGuestPhysAddr)
1378{
1379 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1380 pVmcs->u64RoGuestPhysAddr.u = uGuestPhysAddr;
1381}
1382
1383
1384/**
1385 * Sets the VM-exit instruction length VMCS field.
1386 *
1387 * @param pVCpu The cross context virtual CPU structure.
1388 * @param cbInstr The VM-exit instruction length in bytes.
1389 */
1390DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrLen(PVMCPU pVCpu, uint32_t cbInstr)
1391{
1392 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1393 pVmcs->u32RoExitInstrLen = cbInstr;
1394}
1395
1396
1397/**
1398 * Sets the VM-exit instruction info. VMCS field.
1399 *
1400 * @param pVCpu The cross context virtual CPU structure.
1401 * @param uExitInstrInfo The VM-exit instruction info. field.
1402 */
1403DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitInstrInfo)
1404{
1405 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1406 pVmcs->u32RoExitInstrInfo = uExitInstrInfo;
1407}
1408
1409
1410/**
1411 * Implements VMSucceed for VMX instruction success.
1412 *
1413 * @param pVCpu The cross context virtual CPU structure.
1414 */
1415DECL_FORCE_INLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
1416{
1417 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1418}
1419
1420
1421/**
1422 * Implements VMFailInvalid for VMX instruction failure.
1423 *
1424 * @param pVCpu The cross context virtual CPU structure.
1425 */
1426DECL_FORCE_INLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
1427{
1428 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1429 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
1430}
1431
1432
1433/**
1434 * Implements VMFailValid for VMX instruction failure.
1435 *
1436 * @param pVCpu The cross context virtual CPU structure.
1437 * @param enmInsErr The VM instruction error.
1438 */
1439DECL_FORCE_INLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1440{
1441 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1442 {
1443 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1444 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
1445 iemVmxVmcsSetVmInstrErr(pVCpu, enmInsErr);
1446 }
1447}
1448
1449
1450/**
1451 * Implements VMFail for VMX instruction failure.
1452 *
1453 * @param pVCpu The cross context virtual CPU structure.
1454 * @param enmInsErr The VM instruction error.
1455 */
1456DECL_FORCE_INLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1457{
1458 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1459 iemVmxVmFailValid(pVCpu, enmInsErr);
1460 else
1461 iemVmxVmFailInvalid(pVCpu);
1462}
1463
1464
1465/**
1466 * Checks if the given auto-load/store MSR area count is valid for the
1467 * implementation.
1468 *
1469 * @returns @c true if it's within the valid limit, @c false otherwise.
1470 * @param pVCpu The cross context virtual CPU structure.
1471 * @param uMsrCount The MSR area count to check.
1472 */
1473DECL_FORCE_INLINE(bool) iemVmxIsAutoMsrCountValid(PVMCPU pVCpu, uint32_t uMsrCount)
1474{
1475 uint64_t const u64VmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
1476 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(u64VmxMiscMsr);
1477 Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
1478 if (uMsrCount <= cMaxSupportedMsrs)
1479 return true;
1480 return false;
1481}
1482
1483
1484/**
1485 * Flushes the current VMCS contents back to guest memory.
1486 *
1487 * @returns VBox status code.
1488 * @param pVCpu The cross context virtual CPU structure.
1489 */
1490DECL_FORCE_INLINE(int) iemVmxCommitCurrentVmcsToMemory(PVMCPU pVCpu)
1491{
1492 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1493 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
1494 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));
1495 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
1496 return rc;
1497}
1498
1499
1500/**
1501 * Implements VMSucceed for the VMREAD instruction and increments the guest RIP.
1502 *
1503 * @param pVCpu The cross context virtual CPU structure.
1504 */
1505DECL_FORCE_INLINE(void) iemVmxVmreadSuccess(PVMCPU pVCpu, uint8_t cbInstr)
1506{
1507 iemVmxVmSucceed(pVCpu);
1508 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1509}
1510
1511
1512/**
1513 * Gets the instruction diagnostic for segment base checks during VM-entry of a
1514 * nested-guest.
1515 *
1516 * @param iSegReg The segment index (X86_SREG_XXX).
1517 */
1518IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBase(unsigned iSegReg)
1519{
1520 switch (iSegReg)
1521 {
1522 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseCs;
1523 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseDs;
1524 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseEs;
1525 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseFs;
1526 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseGs;
1527 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseSs;
1528 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_1);
1529 }
1530}
1531
1532
1533/**
1534 * Gets the instruction diagnostic for segment base checks during VM-entry of a
1535 * nested-guest that is in Virtual-8086 mode.
1536 *
1537 * @param iSegReg The segment index (X86_SREG_XXX).
1538 */
1539IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBaseV86(unsigned iSegReg)
1540{
1541 switch (iSegReg)
1542 {
1543 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseV86Cs;
1544 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ds;
1545 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseV86Es;
1546 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseV86Fs;
1547 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseV86Gs;
1548 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ss;
1549 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
1550 }
1551}
1552
1553
1554/**
1555 * Gets the instruction diagnostic for segment limit checks during VM-entry of a
1556 * nested-guest that is in Virtual-8086 mode.
1557 *
1558 * @param iSegReg The segment index (X86_SREG_XXX).
1559 */
1560IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegLimitV86(unsigned iSegReg)
1561{
1562 switch (iSegReg)
1563 {
1564 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegLimitV86Cs;
1565 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ds;
1566 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegLimitV86Es;
1567 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegLimitV86Fs;
1568 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegLimitV86Gs;
1569 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ss;
1570 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_3);
1571 }
1572}
1573
1574
1575/**
1576 * Gets the instruction diagnostic for segment attribute checks during VM-entry of a
1577 * nested-guest that is in Virtual-8086 mode.
1578 *
1579 * @param iSegReg The segment index (X86_SREG_XXX).
1580 */
1581IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrV86(unsigned iSegReg)
1582{
1583 switch (iSegReg)
1584 {
1585 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrV86Cs;
1586 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ds;
1587 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrV86Es;
1588 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrV86Fs;
1589 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrV86Gs;
1590 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ss;
1591 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_4);
1592 }
1593}
1594
1595
1596/**
1597 * Gets the instruction diagnostic for segment attributes reserved bits failure
1598 * during VM-entry of a nested-guest.
1599 *
1600 * @param iSegReg The segment index (X86_SREG_XXX).
1601 */
1602IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrRsvd(unsigned iSegReg)
1603{
1604 switch (iSegReg)
1605 {
1606 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdCs;
1607 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdDs;
1608 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrRsvdEs;
1609 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdFs;
1610 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdGs;
1611 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdSs;
1612 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_5);
1613 }
1614}
1615
1616
1617/**
1618 * Gets the instruction diagnostic for segment attributes descriptor-type
1619 * (code/segment or system) failure during VM-entry of a nested-guest.
1620 *
1621 * @param iSegReg The segment index (X86_SREG_XXX).
1622 */
1623IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDescType(unsigned iSegReg)
1624{
1625 switch (iSegReg)
1626 {
1627 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeCs;
1628 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeDs;
1629 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeEs;
1630 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeFs;
1631 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeGs;
1632 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs;
1633 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_6);
1634 }
1635}
1636
1637
1638/**
1639 * Gets the instruction diagnostic for segment attributes descriptor-type
1640 * (code/segment or system) failure during VM-entry of a nested-guest.
1641 *
1642 * @param iSegReg The segment index (X86_SREG_XXX).
1643 */
1644IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrPresent(unsigned iSegReg)
1645{
1646 switch (iSegReg)
1647 {
1648 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrPresentCs;
1649 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrPresentDs;
1650 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrPresentEs;
1651 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrPresentFs;
1652 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrPresentGs;
1653 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrPresentSs;
1654 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_7);
1655 }
1656}
1657
1658
1659/**
1660 * Gets the instruction diagnostic for segment attribute granularity failure during
1661 * VM-entry of a nested-guest.
1662 *
1663 * @param iSegReg The segment index (X86_SREG_XXX).
1664 */
1665IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrGran(unsigned iSegReg)
1666{
1667 switch (iSegReg)
1668 {
1669 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrGranCs;
1670 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrGranDs;
1671 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrGranEs;
1672 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrGranFs;
1673 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrGranGs;
1674 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrGranSs;
1675 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_8);
1676 }
1677}
1678
1679/**
1680 * Gets the instruction diagnostic for segment attribute DPL/RPL failure during
1681 * VM-entry of a nested-guest.
1682 *
1683 * @param iSegReg The segment index (X86_SREG_XXX).
1684 */
1685IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDplRpl(unsigned iSegReg)
1686{
1687 switch (iSegReg)
1688 {
1689 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplCs;
1690 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplDs;
1691 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDplRplEs;
1692 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplFs;
1693 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplGs;
1694 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplSs;
1695 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_9);
1696 }
1697}
1698
1699
1700/**
1701 * Gets the instruction diagnostic for segment attribute type accessed failure
1702 * during VM-entry of a nested-guest.
1703 *
1704 * @param iSegReg The segment index (X86_SREG_XXX).
1705 */
1706IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrTypeAcc(unsigned iSegReg)
1707{
1708 switch (iSegReg)
1709 {
1710 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccCs;
1711 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccDs;
1712 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccEs;
1713 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccFs;
1714 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccGs;
1715 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs;
1716 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_10);
1717 }
1718}
1719
1720
1721/**
1722 * Gets the instruction diagnostic for guest CR3 referenced PDPTE reserved bits
1723 * failure during VM-entry of a nested-guest.
1724 *
1725 * @param iSegReg The PDPTE entry index.
1726 */
1727IEM_STATIC VMXVDIAG iemVmxGetDiagVmentryPdpteRsvd(unsigned iPdpte)
1728{
1729 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
1730 switch (iPdpte)
1731 {
1732 case 0: return kVmxVDiag_Vmentry_GuestPdpte0Rsvd;
1733 case 1: return kVmxVDiag_Vmentry_GuestPdpte1Rsvd;
1734 case 2: return kVmxVDiag_Vmentry_GuestPdpte2Rsvd;
1735 case 3: return kVmxVDiag_Vmentry_GuestPdpte3Rsvd;
1736 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_11);
1737 }
1738}
1739
1740
1741/**
1742 * Gets the instruction diagnostic for host CR3 referenced PDPTE reserved bits
1743 * failure during VM-exit of a nested-guest.
1744 *
1745 * @param iSegReg The PDPTE entry index.
1746 */
1747IEM_STATIC VMXVDIAG iemVmxGetDiagVmexitPdpteRsvd(unsigned iPdpte)
1748{
1749 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
1750 switch (iPdpte)
1751 {
1752 case 0: return kVmxVDiag_Vmexit_HostPdpte0Rsvd;
1753 case 1: return kVmxVDiag_Vmexit_HostPdpte1Rsvd;
1754 case 2: return kVmxVDiag_Vmexit_HostPdpte2Rsvd;
1755 case 3: return kVmxVDiag_Vmexit_HostPdpte3Rsvd;
1756 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_12);
1757 }
1758}
1759
1760
1761/**
1762 * Saves the guest control registers, debug registers and some MSRs are part of
1763 * VM-exit.
1764 *
1765 * @param pVCpu The cross context virtual CPU structure.
1766 */
1767IEM_STATIC void iemVmxVmexitSaveGuestControlRegsMsrs(PVMCPU pVCpu)
1768{
1769 /*
1770 * Saves the guest control registers, debug registers and some MSRs.
1771 * See Intel spec. 27.3.1 "Saving Control Registers, Debug Registers and MSRs".
1772 */
1773 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1774
1775 /* Save control registers. */
1776 pVmcs->u64GuestCr0.u = pVCpu->cpum.GstCtx.cr0;
1777 pVmcs->u64GuestCr3.u = pVCpu->cpum.GstCtx.cr3;
1778 pVmcs->u64GuestCr4.u = pVCpu->cpum.GstCtx.cr4;
1779
1780 /* Save SYSENTER CS, ESP, EIP. */
1781 pVmcs->u32GuestSysenterCS = pVCpu->cpum.GstCtx.SysEnter.cs;
1782 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1783 {
1784 pVmcs->u64GuestSysenterEsp.u = pVCpu->cpum.GstCtx.SysEnter.esp;
1785 pVmcs->u64GuestSysenterEip.u = pVCpu->cpum.GstCtx.SysEnter.eip;
1786 }
1787 else
1788 {
1789 pVmcs->u64GuestSysenterEsp.s.Lo = pVCpu->cpum.GstCtx.SysEnter.esp;
1790 pVmcs->u64GuestSysenterEip.s.Lo = pVCpu->cpum.GstCtx.SysEnter.eip;
1791 }
1792
1793 /* Save debug registers (DR7 and IA32_DEBUGCTL MSR). */
1794 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG)
1795 {
1796 pVmcs->u64GuestDr7.u = pVCpu->cpum.GstCtx.dr[7];
1797 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
1798 }
1799
1800 /* Save PAT MSR. */
1801 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR)
1802 pVmcs->u64GuestPatMsr.u = pVCpu->cpum.GstCtx.msrPAT;
1803
1804 /* Save EFER MSR. */
1805 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR)
1806 pVmcs->u64GuestEferMsr.u = pVCpu->cpum.GstCtx.msrEFER;
1807
1808 /* We don't support clearing IA32_BNDCFGS MSR yet. */
1809 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR));
1810
1811 /* Nothing to do for SMBASE register - We don't support SMM yet. */
1812}
1813
1814
1815/**
1816 * Saves the guest force-flags in preparation of entering the nested-guest.
1817 *
1818 * @param pVCpu The cross context virtual CPU structure.
1819 */
1820IEM_STATIC void iemVmxVmentrySaveForceFlags(PVMCPU pVCpu)
1821{
1822 /* We shouldn't be called multiple times during VM-entry. */
1823 Assert(pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions == 0);
1824
1825 /* MTF should not be set outside VMX non-root mode. */
1826 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_MTF));
1827
1828 /*
1829 * Preserve the required force-flags.
1830 *
1831 * We cache and clear force-flags that would affect the execution of the
1832 * nested-guest. Cached flags are then restored while returning to the guest
1833 * if necessary.
1834 *
1835 * - VMCPU_FF_INHIBIT_INTERRUPTS need not be cached as it only affects
1836 * interrupts until the completion of the current VMLAUNCH/VMRESUME
1837 * instruction. Interrupt inhibition for any nested-guest instruction
1838 * will be set later while loading the guest-interruptibility state.
1839 *
1840 * - VMCPU_FF_BLOCK_NMIS needs to be cached as VM-exits caused before
1841 * successful VM-entry needs to continue blocking NMIs if it was in effect
1842 * during VM-entry.
1843 *
1844 * - MTF need not be preserved as it's used only in VMX non-root mode and
1845 * is supplied on VM-entry through the VM-execution controls.
1846 *
1847 * The remaining FFs (e.g. timers, APIC updates) must stay in place so that
1848 * we will be able to generate interrupts that may cause VM-exits for
1849 * the nested-guest.
1850 */
1851 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
1852
1853 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_BLOCK_NMIS))
1854 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_BLOCK_NMIS);
1855}
1856
1857
1858/**
1859 * Restores the guest force-flags in preparation of exiting the nested-guest.
1860 *
1861 * @param pVCpu The cross context virtual CPU structure.
1862 */
1863IEM_STATIC void iemVmxVmexitRestoreForceFlags(PVMCPU pVCpu)
1864{
1865 if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
1866 {
1867 VMCPU_FF_SET(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);
1868 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;
1869 }
1870}
1871
1872
1873/**
1874 * Perform a VMX transition updated PGM, IEM and CPUM.
1875 *
1876 * @param pVCpu The cross context virtual CPU structure.
1877 */
1878IEM_STATIC int iemVmxWorldSwitch(PVMCPU pVCpu)
1879{
1880 /*
1881 * Inform PGM about paging mode changes.
1882 * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
1883 * see comment in iemMemPageTranslateAndCheckAccess().
1884 */
1885 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
1886# ifdef IN_RING3
1887 Assert(rc != VINF_PGM_CHANGE_MODE);
1888# endif
1889 AssertRCReturn(rc, rc);
1890
1891 /* Inform CPUM (recompiler), can later be removed. */
1892 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1893
1894 /*
1895 * Flush the TLB with new CR3. This is required in case the PGM mode change
1896 * above doesn't actually change anything.
1897 */
1898 if (rc == VINF_SUCCESS)
1899 {
1900 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true);
1901 AssertRCReturn(rc, rc);
1902 }
1903
1904 /* Re-initialize IEM cache/state after the drastic mode switch. */
1905 iemReInitExec(pVCpu);
1906 return rc;
1907}
1908
1909
1910/**
1911 * Saves guest segment registers, GDTR, IDTR, LDTR, TR as part of VM-exit.
1912 *
1913 * @param pVCpu The cross context virtual CPU structure.
1914 */
1915IEM_STATIC void iemVmxVmexitSaveGuestSegRegs(PVMCPU pVCpu)
1916{
1917 /*
1918 * Save guest segment registers, GDTR, IDTR, LDTR, TR.
1919 * See Intel spec 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
1920 */
1921 /* CS, SS, ES, DS, FS, GS. */
1922 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1923 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
1924 {
1925 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1926 if (!pSelReg->Attr.n.u1Unusable)
1927 iemVmxVmcsSetGuestSegReg(pVmcs, iSegReg, pSelReg);
1928 else
1929 {
1930 /*
1931 * For unusable segments the attributes are undefined except for CS and SS.
1932 * For the rest we don't bother preserving anything but the unusable bit.
1933 */
1934 switch (iSegReg)
1935 {
1936 case X86_SREG_CS:
1937 pVmcs->GuestCs = pSelReg->Sel;
1938 pVmcs->u64GuestCsBase.u = pSelReg->u64Base;
1939 pVmcs->u32GuestCsLimit = pSelReg->u32Limit;
1940 pVmcs->u32GuestCsAttr = pSelReg->Attr.u & ( X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
1941 | X86DESCATTR_UNUSABLE);
1942 break;
1943
1944 case X86_SREG_SS:
1945 pVmcs->GuestSs = pSelReg->Sel;
1946 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1947 pVmcs->u64GuestSsBase.u &= UINT32_C(0xffffffff);
1948 pVmcs->u32GuestSsAttr = pSelReg->Attr.u & (X86DESCATTR_DPL | X86DESCATTR_UNUSABLE);
1949 break;
1950
1951 case X86_SREG_DS:
1952 pVmcs->GuestDs = pSelReg->Sel;
1953 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1954 pVmcs->u64GuestDsBase.u &= UINT32_C(0xffffffff);
1955 pVmcs->u32GuestDsAttr = X86DESCATTR_UNUSABLE;
1956 break;
1957
1958 case X86_SREG_ES:
1959 pVmcs->GuestEs = pSelReg->Sel;
1960 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1961 pVmcs->u64GuestEsBase.u &= UINT32_C(0xffffffff);
1962 pVmcs->u32GuestEsAttr = X86DESCATTR_UNUSABLE;
1963 break;
1964
1965 case X86_SREG_FS:
1966 pVmcs->GuestFs = pSelReg->Sel;
1967 pVmcs->u64GuestFsBase.u = pSelReg->u64Base;
1968 pVmcs->u32GuestFsAttr = X86DESCATTR_UNUSABLE;
1969 break;
1970
1971 case X86_SREG_GS:
1972 pVmcs->GuestGs = pSelReg->Sel;
1973 pVmcs->u64GuestGsBase.u = pSelReg->u64Base;
1974 pVmcs->u32GuestGsAttr = X86DESCATTR_UNUSABLE;
1975 break;
1976 }
1977 }
1978 }
1979
1980 /* Segment attribute bits 31:7 and 11:8 MBZ. */
1981 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
1982 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_UNUSABLE;
1983 /* LDTR. */
1984 {
1985 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.ldtr;
1986 pVmcs->GuestLdtr = pSelReg->Sel;
1987 pVmcs->u64GuestLdtrBase.u = pSelReg->u64Base;
1988 Assert(X86_IS_CANONICAL(pSelReg->u64Base));
1989 pVmcs->u32GuestLdtrLimit = pSelReg->u32Limit;
1990 pVmcs->u32GuestLdtrAttr = pSelReg->Attr.u & fValidAttrMask;
1991 }
1992
1993 /* TR. */
1994 {
1995 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.tr;
1996 pVmcs->GuestTr = pSelReg->Sel;
1997 pVmcs->u64GuestTrBase.u = pSelReg->u64Base;
1998 pVmcs->u32GuestTrLimit = pSelReg->u32Limit;
1999 pVmcs->u32GuestTrAttr = pSelReg->Attr.u & fValidAttrMask;
2000 }
2001
2002 /* GDTR. */
2003 pVmcs->u64GuestGdtrBase.u = pVCpu->cpum.GstCtx.gdtr.pGdt;
2004 pVmcs->u32GuestGdtrLimit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
2005
2006 /* IDTR. */
2007 pVmcs->u64GuestIdtrBase.u = pVCpu->cpum.GstCtx.idtr.pIdt;
2008 pVmcs->u32GuestIdtrLimit = pVCpu->cpum.GstCtx.idtr.cbIdt;
2009}
2010
2011
2012/**
2013 * Saves guest non-register state as part of VM-exit.
2014 *
2015 * @param pVCpu The cross context virtual CPU structure.
2016 * @param uExitReason The VM-exit reason.
2017 */
2018IEM_STATIC void iemVmxVmexitSaveGuestNonRegState(PVMCPU pVCpu, uint32_t uExitReason)
2019{
2020 /*
2021 * Save guest non-register state.
2022 * See Intel spec. 27.3.4 "Saving Non-Register State".
2023 */
2024 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2025
2026 /* Activity-state: VM-exits occur before changing the activity state, nothing further to do */
2027
2028 /* Interruptibility-state. */
2029 pVmcs->u32GuestIntrState = 0;
2030 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
2031 { /** @todo NSTVMX: Virtual-NMI blocking. */ }
2032 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
2033 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
2034
2035 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2036 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
2037 {
2038 /** @todo NSTVMX: We can't distinguish between blocking-by-MovSS and blocking-by-STI
2039 * currently. */
2040 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
2041 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2042 }
2043 /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */
2044
2045 /* Pending debug exceptions. */
2046 if ( uExitReason != VMX_EXIT_INIT_SIGNAL
2047 && uExitReason != VMX_EXIT_SMI
2048 && uExitReason != VMX_EXIT_ERR_MACHINE_CHECK
2049 && !HMVmxIsTrapLikeVmexit(uExitReason))
2050 {
2051 /** @todo NSTVMX: also must exclude VM-exits caused by debug exceptions when
2052 * block-by-MovSS is in effect. */
2053 pVmcs->u64GuestPendingDbgXcpt.u = 0;
2054 }
2055
2056 /** @todo NSTVMX: Save VMX preemption timer value. */
2057
2058 /* PDPTEs. */
2059 /* We don't support EPT yet. */
2060 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
2061 pVmcs->u64GuestPdpte0.u = 0;
2062 pVmcs->u64GuestPdpte1.u = 0;
2063 pVmcs->u64GuestPdpte2.u = 0;
2064 pVmcs->u64GuestPdpte3.u = 0;
2065}
2066
2067
2068/**
2069 * Saves the guest-state as part of VM-exit.
2070 *
2071 * @returns VBox status code.
2072 * @param pVCpu The cross context virtual CPU structure.
2073 * @param uExitReason The VM-exit reason.
2074 */
2075IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPU pVCpu, uint32_t uExitReason)
2076{
2077 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2078 Assert(pVmcs);
2079
2080 iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu);
2081 iemVmxVmexitSaveGuestSegRegs(pVCpu);
2082
2083 /*
2084 * Save guest RIP, RSP and RFLAGS.
2085 * See Intel spec. 27.3.3 "Saving RIP, RSP and RFLAGS".
2086 */
2087 /* We don't support enclave mode yet. */
2088 pVmcs->u64GuestRip.u = pVCpu->cpum.GstCtx.rip;
2089 pVmcs->u64GuestRsp.u = pVCpu->cpum.GstCtx.rsp;
2090 pVmcs->u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u; /** @todo NSTVMX: Check RFLAGS.RF handling. */
2091
2092 iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason);
2093}
2094
2095
2096/**
2097 * Saves the guest MSRs into the VM-exit auto-store MSRs area as part of VM-exit.
2098 *
2099 * @returns VBox status code.
2100 * @param pVCpu The cross context virtual CPU structure.
2101 * @param uExitReason The VM-exit reason (for diagnostic purposes).
2102 */
2103IEM_STATIC int iemVmxVmexitSaveGuestAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
2104{
2105 /*
2106 * Save guest MSRs.
2107 * See Intel spec. 27.4 "Saving MSRs".
2108 */
2109 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2110 const char *const pszFailure = "VMX-abort";
2111
2112 /*
2113 * The VM-exit MSR-store area address need not be a valid guest-physical address if the
2114 * VM-exit MSR-store count is 0. If this is the case, bail early without reading it.
2115 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
2116 */
2117 uint32_t const cMsrs = pVmcs->u32ExitMsrStoreCount;
2118 if (!cMsrs)
2119 return VINF_SUCCESS;
2120
2121 /*
2122 * Verify the MSR auto-store count. Physical CPUs can behave unpredictably if the count
2123 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
2124 * implementation causes a VMX-abort followed by a triple-fault.
2125 */
2126 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
2127 if (fIsMsrCountValid)
2128 { /* likely */ }
2129 else
2130 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount);
2131
2132 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
2133 Assert(pMsr);
2134 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
2135 {
2136 if ( !pMsr->u32Reserved
2137 && pMsr->u32Msr != MSR_IA32_SMBASE
2138 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
2139 {
2140 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pMsr->u32Msr, &pMsr->u64Value);
2141 if (rcStrict == VINF_SUCCESS)
2142 continue;
2143
2144 /*
2145 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
2146 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
2147 * recording the MSR index in the auxiliary info. field and indicated further by our
2148 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
2149 * if possible, or come up with a better, generic solution.
2150 */
2151 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2152 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_READ
2153 ? kVmxVDiag_Vmexit_MsrStoreRing3
2154 : kVmxVDiag_Vmexit_MsrStore;
2155 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2156 }
2157 else
2158 {
2159 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2160 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreRsvd);
2161 }
2162 }
2163
2164 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrStore.u;
2165 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysAutoMsrArea,
2166 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), VMX_V_AUTOMSR_AREA_SIZE);
2167 if (RT_SUCCESS(rc))
2168 { /* likely */ }
2169 else
2170 {
2171 AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
2172 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys);
2173 }
2174
2175 NOREF(uExitReason);
2176 NOREF(pszFailure);
2177 return VINF_SUCCESS;
2178}
2179
2180
2181/**
2182 * Performs a VMX abort (due to an fatal error during VM-exit).
2183 *
2184 * @returns Strict VBox status code.
2185 * @param pVCpu The cross context virtual CPU structure.
2186 * @param enmAbort The VMX abort reason.
2187 */
2188IEM_STATIC VBOXSTRICTRC iemVmxAbort(PVMCPU pVCpu, VMXABORT enmAbort)
2189{
2190 /*
2191 * Perform the VMX abort.
2192 * See Intel spec. 27.7 "VMX Aborts".
2193 */
2194 LogFunc(("enmAbort=%u (%s) -> RESET\n", enmAbort, HMVmxGetAbortDesc(enmAbort)));
2195
2196 /* We don't support SMX yet. */
2197 pVCpu->cpum.GstCtx.hwvirt.vmx.enmAbort = enmAbort;
2198 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
2199 {
2200 RTGCPHYS const GCPhysVmcs = IEM_VMX_GET_CURRENT_VMCS(pVCpu);
2201 uint32_t const offVmxAbort = RT_OFFSETOF(VMXVVMCS, u32VmxAbortId);
2202 PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + offVmxAbort, &enmAbort, sizeof(enmAbort));
2203 }
2204
2205 return VINF_EM_TRIPLE_FAULT;
2206}
2207
2208
2209/**
2210 * Loads host control registers, debug registers and MSRs as part of VM-exit.
2211 *
2212 * @param pVCpu The cross context virtual CPU structure.
2213 */
2214IEM_STATIC void iemVmxVmexitLoadHostControlRegsMsrs(PVMCPU pVCpu)
2215{
2216 /*
2217 * Load host control registers, debug registers and MSRs.
2218 * See Intel spec. 27.5.1 "Loading Host Control Registers, Debug Registers, MSRs".
2219 */
2220 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2221 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2222
2223 /* CR0. */
2224 {
2225 /* Bits 63:32, 28:19, 17, 15:6, ET, CD, NW and CR0 MB1 bits are not modified. */
2226 uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
2227 uint64_t const fCr0IgnMask = UINT64_C(0xffffffff1ff8ffc0) | X86_CR0_ET | X86_CR0_CD | X86_CR0_NW | uCr0Fixed0;
2228 uint64_t const uHostCr0 = pVmcs->u64HostCr0.u;
2229 uint64_t const uGuestCr0 = pVCpu->cpum.GstCtx.cr0;
2230 uint64_t const uValidCr0 = (uHostCr0 & ~fCr0IgnMask) | (uGuestCr0 & fCr0IgnMask);
2231 CPUMSetGuestCR0(pVCpu, uValidCr0);
2232 }
2233
2234 /* CR4. */
2235 {
2236 /* CR4 MB1 bits are not modified. */
2237 uint64_t const fCr4IgnMask = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
2238 uint64_t const uHostCr4 = pVmcs->u64HostCr4.u;
2239 uint64_t const uGuestCr4 = pVCpu->cpum.GstCtx.cr4;
2240 uint64_t uValidCr4 = (uHostCr4 & ~fCr4IgnMask) | (uGuestCr4 & fCr4IgnMask);
2241 if (fHostInLongMode)
2242 uValidCr4 |= X86_CR4_PAE;
2243 else
2244 uValidCr4 &= ~X86_CR4_PCIDE;
2245 CPUMSetGuestCR4(pVCpu, uValidCr4);
2246 }
2247
2248 /* CR3 (host value validated while checking host-state during VM-entry). */
2249 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64HostCr3.u;
2250
2251 /* DR7. */
2252 pVCpu->cpum.GstCtx.dr[7] = X86_DR7_INIT_VAL;
2253
2254 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
2255
2256 /* Save SYSENTER CS, ESP, EIP (host value validated while checking host-state during VM-entry). */
2257 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64HostSysenterEip.u;
2258 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64HostSysenterEsp.u;
2259 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32HostSysenterCs;
2260
2261 /* FS, GS bases are loaded later while we load host segment registers. */
2262
2263 /* EFER MSR (host value validated while checking host-state during VM-entry). */
2264 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
2265 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64HostEferMsr.u;
2266 else if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2267 {
2268 if (fHostInLongMode)
2269 pVCpu->cpum.GstCtx.msrEFER |= (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
2270 else
2271 pVCpu->cpum.GstCtx.msrEFER &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
2272 }
2273
2274 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
2275
2276 /* PAT MSR (host value is validated while checking host-state during VM-entry). */
2277 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
2278 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64HostPatMsr.u;
2279
2280 /* We don't support IA32_BNDCFGS MSR yet. */
2281}
2282
2283
2284/**
2285 * Loads host segment registers, GDTR, IDTR, LDTR and TR as part of VM-exit.
2286 *
2287 * @param pVCpu The cross context virtual CPU structure.
2288 */
2289IEM_STATIC void iemVmxVmexitLoadHostSegRegs(PVMCPU pVCpu)
2290{
2291 /*
2292 * Load host segment registers, GDTR, IDTR, LDTR and TR.
2293 * See Intel spec. 27.5.2 "Loading Host Segment and Descriptor-Table Registers".
2294 *
2295 * Warning! Be careful to not touch fields that are reserved by VT-x,
2296 * e.g. segment limit high bits stored in segment attributes (in bits 11:8).
2297 */
2298 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2299 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2300
2301 /* CS, SS, ES, DS, FS, GS. */
2302 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
2303 {
2304 RTSEL const HostSel = iemVmxVmcsGetHostSelReg(pVmcs, iSegReg);
2305 bool const fUnusable = RT_BOOL(HostSel == 0);
2306
2307 /* Selector. */
2308 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel = HostSel;
2309 pVCpu->cpum.GstCtx.aSRegs[iSegReg].ValidSel = HostSel;
2310 pVCpu->cpum.GstCtx.aSRegs[iSegReg].fFlags = CPUMSELREG_FLAGS_VALID;
2311
2312 /* Limit. */
2313 pVCpu->cpum.GstCtx.aSRegs[iSegReg].u32Limit = 0xffffffff;
2314
2315 /* Base and Attributes. */
2316 switch (iSegReg)
2317 {
2318 case X86_SREG_CS:
2319 {
2320 pVCpu->cpum.GstCtx.cs.u64Base = 0;
2321 pVCpu->cpum.GstCtx.cs.Attr.n.u4Type = X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED;
2322 pVCpu->cpum.GstCtx.ss.Attr.n.u1DescType = 1;
2323 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = 0;
2324 pVCpu->cpum.GstCtx.cs.Attr.n.u1Present = 1;
2325 pVCpu->cpum.GstCtx.cs.Attr.n.u1Long = fHostInLongMode;
2326 pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig = !fHostInLongMode;
2327 pVCpu->cpum.GstCtx.cs.Attr.n.u1Granularity = 1;
2328 Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Unusable);
2329 Assert(!fUnusable);
2330 break;
2331 }
2332
2333 case X86_SREG_SS:
2334 case X86_SREG_ES:
2335 case X86_SREG_DS:
2336 {
2337 pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base = 0;
2338 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2339 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DescType = 1;
2340 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u2Dpl = 0;
2341 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Present = 1;
2342 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DefBig = 1;
2343 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Granularity = 1;
2344 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable = fUnusable;
2345 break;
2346 }
2347
2348 case X86_SREG_FS:
2349 {
2350 Assert(X86_IS_CANONICAL(pVmcs->u64HostFsBase.u));
2351 pVCpu->cpum.GstCtx.fs.u64Base = !fUnusable ? pVmcs->u64HostFsBase.u : 0;
2352 pVCpu->cpum.GstCtx.fs.Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2353 pVCpu->cpum.GstCtx.fs.Attr.n.u1DescType = 1;
2354 pVCpu->cpum.GstCtx.fs.Attr.n.u2Dpl = 0;
2355 pVCpu->cpum.GstCtx.fs.Attr.n.u1Present = 1;
2356 pVCpu->cpum.GstCtx.fs.Attr.n.u1DefBig = 1;
2357 pVCpu->cpum.GstCtx.fs.Attr.n.u1Granularity = 1;
2358 pVCpu->cpum.GstCtx.fs.Attr.n.u1Unusable = fUnusable;
2359 break;
2360 }
2361
2362 case X86_SREG_GS:
2363 {
2364 Assert(X86_IS_CANONICAL(pVmcs->u64HostGsBase.u));
2365 pVCpu->cpum.GstCtx.gs.u64Base = !fUnusable ? pVmcs->u64HostGsBase.u : 0;
2366 pVCpu->cpum.GstCtx.gs.Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2367 pVCpu->cpum.GstCtx.gs.Attr.n.u1DescType = 1;
2368 pVCpu->cpum.GstCtx.gs.Attr.n.u2Dpl = 0;
2369 pVCpu->cpum.GstCtx.gs.Attr.n.u1Present = 1;
2370 pVCpu->cpum.GstCtx.gs.Attr.n.u1DefBig = 1;
2371 pVCpu->cpum.GstCtx.gs.Attr.n.u1Granularity = 1;
2372 pVCpu->cpum.GstCtx.gs.Attr.n.u1Unusable = fUnusable;
2373 break;
2374 }
2375 }
2376 }
2377
2378 /* TR. */
2379 Assert(X86_IS_CANONICAL(pVmcs->u64HostTrBase.u));
2380 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1Unusable);
2381 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->HostTr;
2382 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->HostTr;
2383 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2384 pVCpu->cpum.GstCtx.tr.u32Limit = X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN;
2385 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64HostTrBase.u;
2386 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2387 pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType = 0;
2388 pVCpu->cpum.GstCtx.tr.Attr.n.u2Dpl = 0;
2389 pVCpu->cpum.GstCtx.tr.Attr.n.u1Present = 1;
2390 pVCpu->cpum.GstCtx.tr.Attr.n.u1DefBig = 0;
2391 pVCpu->cpum.GstCtx.tr.Attr.n.u1Granularity = 0;
2392
2393 /* LDTR. */
2394 pVCpu->cpum.GstCtx.ldtr.Sel = 0;
2395 pVCpu->cpum.GstCtx.ldtr.ValidSel = 0;
2396 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2397 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
2398 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
2399 pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Unusable = 1;
2400
2401 /* GDTR. */
2402 Assert(X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u));
2403 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64HostGdtrBase.u;
2404 pVCpu->cpum.GstCtx.gdtr.cbGdt = 0xfff;
2405
2406 /* IDTR.*/
2407 Assert(X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u));
2408 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64HostIdtrBase.u;
2409 pVCpu->cpum.GstCtx.idtr.cbIdt = 0xfff;
2410}
2411
2412
2413/**
2414 * Checks host PDPTes as part of VM-exit.
2415 *
2416 * @param pVCpu The cross context virtual CPU structure.
2417 * @param uExitReason The VM-exit reason (for logging purposes).
2418 */
2419IEM_STATIC int iemVmxVmexitCheckHostPdptes(PVMCPU pVCpu, uint32_t uExitReason)
2420{
2421 /*
2422 * Check host PDPTEs.
2423 * See Intel spec. 27.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
2424 */
2425 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2426 const char *const pszFailure = "VMX-abort";
2427 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2428
2429 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
2430 && !fHostInLongMode)
2431 {
2432 uint64_t const uHostCr3 = pVCpu->cpum.GstCtx.cr3 & X86_CR3_PAE_PAGE_MASK;
2433 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
2434 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uHostCr3, sizeof(aPdptes));
2435 if (RT_SUCCESS(rc))
2436 {
2437 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
2438 {
2439 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
2440 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
2441 { /* likely */ }
2442 else
2443 {
2444 VMXVDIAG const enmDiag = iemVmxGetDiagVmexitPdpteRsvd(iPdpte);
2445 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2446 }
2447 }
2448 }
2449 else
2450 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpteCr3ReadPhys);
2451 }
2452
2453 NOREF(pszFailure);
2454 NOREF(uExitReason);
2455 return VINF_SUCCESS;
2456}
2457
2458
2459/**
2460 * Loads the host MSRs from the VM-exit auto-load MSRs area as part of VM-exit.
2461 *
2462 * @returns VBox status code.
2463 * @param pVCpu The cross context virtual CPU structure.
2464 * @param pszInstr The VMX instruction name (for logging purposes).
2465 */
2466IEM_STATIC int iemVmxVmexitLoadHostAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
2467{
2468 /*
2469 * Load host MSRs.
2470 * See Intel spec. 27.6 "Loading MSRs".
2471 */
2472 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2473 const char *const pszFailure = "VMX-abort";
2474
2475 /*
2476 * The VM-exit MSR-load area address need not be a valid guest-physical address if the
2477 * VM-exit MSR load count is 0. If this is the case, bail early without reading it.
2478 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
2479 */
2480 uint32_t const cMsrs = pVmcs->u32ExitMsrLoadCount;
2481 if (!cMsrs)
2482 return VINF_SUCCESS;
2483
2484 /*
2485 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count
2486 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
2487 * implementation causes a VMX-abort followed by a triple-fault.
2488 */
2489 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
2490 if (fIsMsrCountValid)
2491 { /* likely */ }
2492 else
2493 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount);
2494
2495 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrLoad.u;
2496 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
2497 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
2498 if (RT_SUCCESS(rc))
2499 {
2500 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
2501 Assert(pMsr);
2502 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
2503 {
2504 if ( !pMsr->u32Reserved
2505 && pMsr->u32Msr != MSR_K8_FS_BASE
2506 && pMsr->u32Msr != MSR_K8_GS_BASE
2507 && pMsr->u32Msr != MSR_K6_EFER
2508 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
2509 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
2510 {
2511 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
2512 if (rcStrict == VINF_SUCCESS)
2513 continue;
2514
2515 /*
2516 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
2517 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
2518 * recording the MSR index in the auxiliary info. field and indicated further by our
2519 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
2520 * if possible, or come up with a better, generic solution.
2521 */
2522 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2523 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
2524 ? kVmxVDiag_Vmexit_MsrLoadRing3
2525 : kVmxVDiag_Vmexit_MsrLoad;
2526 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2527 }
2528 else
2529 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadRsvd);
2530 }
2531 }
2532 else
2533 {
2534 AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
2535 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys);
2536 }
2537
2538 NOREF(uExitReason);
2539 NOREF(pszFailure);
2540 return VINF_SUCCESS;
2541}
2542
2543
2544/**
2545 * Loads the host state as part of VM-exit.
2546 *
2547 * @returns Strict VBox status code.
2548 * @param pVCpu The cross context virtual CPU structure.
2549 * @param uExitReason The VM-exit reason (for logging purposes).
2550 */
2551IEM_STATIC VBOXSTRICTRC iemVmxVmexitLoadHostState(PVMCPU pVCpu, uint32_t uExitReason)
2552{
2553 /*
2554 * Load host state.
2555 * See Intel spec. 27.5 "Loading Host State".
2556 */
2557 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2558 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2559
2560 /* We cannot return from a long-mode guest to a host that is not in long mode. */
2561 if ( CPUMIsGuestInLongMode(pVCpu)
2562 && !fHostInLongMode)
2563 {
2564 Log(("VM-exit from long-mode guest to host not in long-mode -> VMX-Abort\n"));
2565 return iemVmxAbort(pVCpu, VMXABORT_HOST_NOT_IN_LONG_MODE);
2566 }
2567
2568 iemVmxVmexitLoadHostControlRegsMsrs(pVCpu);
2569 iemVmxVmexitLoadHostSegRegs(pVCpu);
2570
2571 /*
2572 * Load host RIP, RSP and RFLAGS.
2573 * See Intel spec. 27.5.3 "Loading Host RIP, RSP and RFLAGS"
2574 */
2575 pVCpu->cpum.GstCtx.rip = pVmcs->u64HostRip.u;
2576 pVCpu->cpum.GstCtx.rsp = pVmcs->u64HostRsp.u;
2577 pVCpu->cpum.GstCtx.rflags.u = X86_EFL_1;
2578
2579 /* Update non-register state. */
2580 iemVmxVmexitRestoreForceFlags(pVCpu);
2581
2582 /* Clear address range monitoring. */
2583 EMMonitorWaitClear(pVCpu);
2584
2585 /* Perform the VMX transition (PGM updates). */
2586 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
2587 if (rcStrict == VINF_SUCCESS)
2588 {
2589 /* Check host PDPTEs (only when we've fully switched page tables_. */
2590 /** @todo r=ramshankar: I don't know if PGM does this for us already or not... */
2591 int rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason);
2592 if (RT_FAILURE(rc))
2593 {
2594 Log(("VM-exit failed while restoring host PDPTEs -> VMX-Abort\n"));
2595 return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
2596 }
2597 }
2598 else if (RT_SUCCESS(rcStrict))
2599 {
2600 Log3(("VM-exit: iemVmxWorldSwitch returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),
2601 uExitReason));
2602 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2603 }
2604 else
2605 {
2606 Log3(("VM-exit: iemVmxWorldSwitch failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
2607 return VBOXSTRICTRC_VAL(rcStrict);
2608 }
2609
2610 Assert(rcStrict == VINF_SUCCESS);
2611
2612 /* Load MSRs from the VM-exit auto-load MSR area. */
2613 int rc = iemVmxVmexitLoadHostAutoMsrs(pVCpu, uExitReason);
2614 if (RT_FAILURE(rc))
2615 {
2616 Log(("VM-exit failed while loading host MSRs -> VMX-Abort\n"));
2617 return iemVmxAbort(pVCpu, VMXABORT_LOAD_HOST_MSR);
2618 }
2619
2620 return rcStrict;
2621}
2622
2623
2624/**
2625 * VMX VM-exit handler.
2626 *
2627 * @returns Strict VBox status code.
2628 * @retval VINF_VMX_VMEXIT when the VM-exit is successful.
2629 * @retval VINF_EM_TRIPLE_FAULT when VM-exit is unsuccessful and leads to a
2630 * triple-fault.
2631 *
2632 * @param pVCpu The cross context virtual CPU structure.
2633 * @param uExitReason The VM-exit reason.
2634 */
2635IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason)
2636{
2637 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2638 Assert(pVmcs);
2639
2640 pVmcs->u32RoExitReason = uExitReason;
2641
2642 /** @todo NSTVMX: IEMGetCurrentXcpt will be VM-exit interruption info. */
2643 /** @todo NSTVMX: The source event should be recorded in IDT-vectoring info
2644 * during injection. */
2645
2646 /*
2647 * Save the guest state back into the VMCS.
2648 * We only need to save the state when the VM-entry was successful.
2649 */
2650 bool const fVmentryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
2651 if (!fVmentryFailed)
2652 {
2653 iemVmxVmexitSaveGuestState(pVCpu, uExitReason);
2654 int rc = iemVmxVmexitSaveGuestAutoMsrs(pVCpu, uExitReason);
2655 if (RT_SUCCESS(rc))
2656 { /* likely */ }
2657 else
2658 return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS);
2659 }
2660
2661 /*
2662 * The high bits of the VM-exit reason are only relevant when the VM-exit occurs in
2663 * enclave mode/SMM which we don't support yet. If we ever add support for it, we can
2664 * pass just the lower bits, till then an assert should suffice.
2665 */
2666 Assert(!RT_HI_U16(uExitReason));
2667
2668 VBOXSTRICTRC rcStrict = iemVmxVmexitLoadHostState(pVCpu, uExitReason);
2669 if (RT_FAILURE(rcStrict))
2670 LogFunc(("Loading host-state failed. uExitReason=%u rc=%Rrc\n", uExitReason, VBOXSTRICTRC_VAL(rcStrict)));
2671
2672 /* We're no longer in nested-guest execution mode. */
2673 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
2674
2675 return rcStrict;
2676}
2677
2678
2679/**
2680 * VMX VM-exit handler for VM-exits due to instruction execution.
2681 *
2682 * This is intended for instructions where the caller provides all the relevant
2683 * VM-exit information.
2684 *
2685 * @returns Strict VBox status code.
2686 * @param pVCpu The cross context virtual CPU structure.
2687 * @param pExitInfo Pointer to the VM-exit instruction information struct.
2688 */
2689DECLINLINE(VBOXSTRICTRC) iemVmxVmexitInstrWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
2690{
2691 /*
2692 * For instructions where any of the following fields are not applicable:
2693 * - VM-exit instruction info. is undefined.
2694 * - VM-exit qualification must be cleared.
2695 * - VM-exit guest-linear address is undefined.
2696 * - VM-exit guest-physical address is undefined.
2697 *
2698 * The VM-exit instruction length is mandatory for all VM-exits that are caused by
2699 * instruction execution.
2700 *
2701 * In our implementation in IEM, all undefined fields are generally cleared. However,
2702 * if the caller supplies information (from say the physical CPU directly) it is
2703 * then possible that the undefined fields not cleared.
2704 *
2705 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2706 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
2707 */
2708 Assert(pExitInfo);
2709 AssertMsg(pExitInfo->uReason <= VMX_EXIT_MAX, ("uReason=%u\n", pExitInfo->uReason));
2710 AssertMsg(pExitInfo->cbInstr >= 1 && pExitInfo->cbInstr <= 15,
2711 ("uReason=%u cbInstr=%u\n", pExitInfo->uReason, pExitInfo->cbInstr));
2712
2713 /* Update all the relevant fields from the VM-exit instruction information struct. */
2714 iemVmxVmcsSetExitInstrInfo(pVCpu, pExitInfo->InstrInfo.u);
2715 iemVmxVmcsSetExitQual(pVCpu, pExitInfo->u64Qual);
2716 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, pExitInfo->u64GuestLinearAddr);
2717 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, pExitInfo->u64GuestPhysAddr);
2718 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
2719
2720 /* Perform the VM-exit. */
2721 return iemVmxVmexit(pVCpu, pExitInfo->uReason);
2722}
2723
2724
2725/**
2726 * VMX VM-exit handler for VM-exits due to instruction execution.
2727 *
2728 * This is intended for instructions that only provide the VM-exit instruction
2729 * length.
2730 *
2731 * @param pVCpu The cross context virtual CPU structure.
2732 * @param uExitReason The VM-exit reason.
2733 * @param cbInstr The instruction length in bytes.
2734 */
2735IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstr(PVMCPU pVCpu, uint32_t uExitReason, uint8_t cbInstr)
2736{
2737 VMXVEXITINFO ExitInfo;
2738 RT_ZERO(ExitInfo);
2739 ExitInfo.uReason = uExitReason;
2740 ExitInfo.cbInstr = cbInstr;
2741
2742#ifdef VBOX_STRICT
2743 /* To prevent us from shooting ourselves in the foot. Maybe remove later. */
2744 switch (uExitReason)
2745 {
2746 case VMX_EXIT_INVEPT:
2747 case VMX_EXIT_INVPCID:
2748 case VMX_EXIT_LDTR_TR_ACCESS:
2749 case VMX_EXIT_GDTR_IDTR_ACCESS:
2750 case VMX_EXIT_VMCLEAR:
2751 case VMX_EXIT_VMPTRLD:
2752 case VMX_EXIT_VMPTRST:
2753 case VMX_EXIT_VMREAD:
2754 case VMX_EXIT_VMWRITE:
2755 case VMX_EXIT_VMXON:
2756 case VMX_EXIT_XRSTORS:
2757 case VMX_EXIT_XSAVES:
2758 case VMX_EXIT_RDRAND:
2759 case VMX_EXIT_RDSEED:
2760 case VMX_EXIT_IO_INSTR:
2761 AssertMsgFailedReturn(("Use iemVmxVmexitInstrNeedsInfo for uExitReason=%u\n", uExitReason), VERR_IEM_IPE_5);
2762 break;
2763 }
2764#endif
2765
2766 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2767}
2768
2769
2770/**
2771 * VMX VM-exit handler for VM-exits due to instruction execution.
2772 *
2773 * This is intended for instructions that have a ModR/M byte and update the VM-exit
2774 * instruction information and VM-exit qualification fields.
2775 *
2776 * @param pVCpu The cross context virtual CPU structure.
2777 * @param uExitReason The VM-exit reason.
2778 * @param uInstrid The instruction identity (VMXINSTRID_XXX).
2779 * @param cbInstr The instruction length in bytes.
2780 *
2781 * @remarks Do not use this for INS/OUTS instruction.
2782 */
2783IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr)
2784{
2785 VMXVEXITINFO ExitInfo;
2786 RT_ZERO(ExitInfo);
2787 ExitInfo.uReason = uExitReason;
2788 ExitInfo.cbInstr = cbInstr;
2789
2790 /*
2791 * Update the VM-exit qualification field with displacement bytes.
2792 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2793 */
2794 switch (uExitReason)
2795 {
2796 case VMX_EXIT_INVEPT:
2797 case VMX_EXIT_INVPCID:
2798 case VMX_EXIT_LDTR_TR_ACCESS:
2799 case VMX_EXIT_GDTR_IDTR_ACCESS:
2800 case VMX_EXIT_VMCLEAR:
2801 case VMX_EXIT_VMPTRLD:
2802 case VMX_EXIT_VMPTRST:
2803 case VMX_EXIT_VMREAD:
2804 case VMX_EXIT_VMWRITE:
2805 case VMX_EXIT_VMXON:
2806 case VMX_EXIT_XRSTORS:
2807 case VMX_EXIT_XSAVES:
2808 case VMX_EXIT_RDRAND:
2809 case VMX_EXIT_RDSEED:
2810 {
2811 /* Construct the VM-exit instruction information. */
2812 RTGCPTR GCPtrDisp;
2813 uint32_t const uInstrInfo = iemVmxGetExitInstrInfo(pVCpu, uExitReason, uInstrId, &GCPtrDisp);
2814
2815 /* Update the VM-exit instruction information. */
2816 ExitInfo.InstrInfo.u = uInstrInfo;
2817
2818 /* Update the VM-exit qualification. */
2819 ExitInfo.u64Qual = GCPtrDisp;
2820 break;
2821 }
2822
2823 default:
2824 AssertMsgFailedReturn(("Use instruction-specific handler\n"), VERR_IEM_IPE_5);
2825 break;
2826 }
2827
2828 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2829}
2830
2831
2832/**
2833 * VMX VM-exit handler for VM-exits due to INVLPG.
2834 *
2835 * @param pVCpu The cross context virtual CPU structure.
2836 * @param GCPtrPage The guest-linear address of the page being invalidated.
2837 * @param cbInstr The instruction length in bytes.
2838 */
2839IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrInvlpg(PVMCPU pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr)
2840{
2841 VMXVEXITINFO ExitInfo;
2842 RT_ZERO(ExitInfo);
2843 ExitInfo.uReason = VMX_EXIT_INVLPG;
2844 ExitInfo.cbInstr = cbInstr;
2845 ExitInfo.u64Qual = GCPtrPage;
2846 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(ExitInfo.u64Qual));
2847
2848 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2849}
2850
2851
2852/**
2853 * VMX VM-exit handler for VM-exits due to LMSW.
2854 *
2855 * @returns Strict VBox status code.
2856 * @param pVCpu The cross context virtual CPU structure.
2857 * @param uGuestCr0 The current guest CR0.
2858 * @param pu16NewMsw The machine-status word specified in LMSW's source
2859 * operand. This will be updated depending on the VMX
2860 * guest/host CR0 mask if LMSW is not intercepted.
2861 * @param GCPtrEffDst The guest-linear address of the source operand in case
2862 * of a memory operand. For register operand, pass
2863 * NIL_RTGCPTR.
2864 * @param cbInstr The instruction length in bytes.
2865 */
2866IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPU pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw, RTGCPTR GCPtrEffDst,
2867 uint8_t cbInstr)
2868{
2869 /*
2870 * LMSW VM-exits are subject to the CR0 guest/host mask and the CR0 read shadow.
2871 *
2872 * See Intel spec. 24.6.6 "Guest/Host Masks and Read Shadows for CR0 and CR4".
2873 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2874 */
2875 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2876 Assert(pVmcs);
2877 Assert(pu16NewMsw);
2878
2879 bool fIntercept = false;
2880 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
2881 uint32_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
2882
2883 /*
2884 * LMSW can never clear CR0.PE but it may set it. Hence, we handle the
2885 * CR0.PE case first, before the rest of the bits in the MSW.
2886 *
2887 * If CR0.PE is owned by the host and CR0.PE differs between the
2888 * MSW (source operand) and the read-shadow, we must cause a VM-exit.
2889 */
2890 if ( (fGstHostMask & X86_CR0_PE)
2891 && (*pu16NewMsw & X86_CR0_PE)
2892 && !(fReadShadow & X86_CR0_PE))
2893 fIntercept = true;
2894
2895 /*
2896 * If CR0.MP, CR0.EM or CR0.TS is owned by the host, and the corresponding
2897 * bits differ between the MSW (source operand) and the read-shadow, we must
2898 * cause a VM-exit.
2899 */
2900 uint32_t fGstHostLmswMask = fGstHostMask & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
2901 if ((fReadShadow & fGstHostLmswMask) != (*pu16NewMsw & fGstHostLmswMask))
2902 fIntercept = true;
2903
2904 if (fIntercept)
2905 {
2906 Log2(("lmsw: Guest intercept -> VM-exit\n"));
2907
2908 VMXVEXITINFO ExitInfo;
2909 RT_ZERO(ExitInfo);
2910 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
2911 ExitInfo.cbInstr = cbInstr;
2912
2913 bool const fMemOperand = RT_BOOL(GCPtrEffDst != NIL_RTGCPTR);
2914 if (fMemOperand)
2915 {
2916 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(GCPtrEffDst));
2917 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
2918 }
2919
2920 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
2921 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_LMSW)
2922 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_OP, fMemOperand)
2923 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_DATA, *pu16NewMsw);
2924
2925 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2926 }
2927
2928 /*
2929 * If LMSW did not cause a VM-exit, any CR0 bits in the range 0:3 that is set in the
2930 * CR0 guest/host mask must be left unmodified.
2931 *
2932 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
2933 */
2934 fGstHostLmswMask = fGstHostMask & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
2935 *pu16NewMsw = (uGuestCr0 & fGstHostLmswMask) | (*pu16NewMsw & ~fGstHostLmswMask);
2936
2937 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
2938}
2939
2940
2941/**
2942 * VMX VM-exit handler for VM-exits due to CLTS.
2943 *
2944 * @returns Strict VBox status code.
2945 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the CLTS instruction did not cause a
2946 * VM-exit but must not modify the guest CR0.TS bit.
2947 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the CLTS instruction did not cause a
2948 * VM-exit and modification to the guest CR0.TS bit is allowed (subject to
2949 * CR0 fixed bits in VMX operation).
2950 * @param pVCpu The cross context virtual CPU structure.
2951 * @param cbInstr The instruction length in bytes.
2952 */
2953IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPU pVCpu, uint8_t cbInstr)
2954{
2955 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2956 Assert(pVmcs);
2957
2958 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
2959 uint32_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
2960
2961 /*
2962 * If CR0.TS is owned by the host:
2963 * - If CR0.TS is set in the read-shadow, we must cause a VM-exit.
2964 * - If CR0.TS is cleared in the read-shadow, no VM-exit is caused and the
2965 * CLTS instruction completes without clearing CR0.TS.
2966 *
2967 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2968 */
2969 if (fGstHostMask & X86_CR0_TS)
2970 {
2971 if (fReadShadow & X86_CR0_TS)
2972 {
2973 Log2(("clts: Guest intercept -> VM-exit\n"));
2974
2975 VMXVEXITINFO ExitInfo;
2976 RT_ZERO(ExitInfo);
2977 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
2978 ExitInfo.cbInstr = cbInstr;
2979
2980 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
2981 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_CLTS);
2982 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2983 }
2984
2985 return VINF_VMX_MODIFIES_BEHAVIOR;
2986 }
2987
2988 /*
2989 * If CR0.TS is not owned by the host, the CLTS instructions operates normally
2990 * and may modify CR0.TS (subject to CR0 fixed bits in VMX operation).
2991 */
2992 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
2993}
2994
2995
2996/**
2997 * VMX VM-exit handler for VM-exits due to 'Mov CR0,GReg' and 'Mov CR4,GReg'
2998 * (CR0/CR4 write).
2999 *
3000 * @returns Strict VBox status code.
3001 * @param pVCpu The cross context virtual CPU structure.
3002 * @param iCrReg The control register (either CR0 or CR4).
3003 * @param uGuestCrX The current guest CR0/CR4.
3004 * @param puNewCrX Pointer to the new CR0/CR4 value. Will be updated
3005 * if no VM-exit is caused.
3006 * @param iGReg The general register from which the CR0/CR4 value is
3007 * being loaded.
3008 * @param cbInstr The instruction length in bytes.
3009 */
3010IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr0Cr4(PVMCPU pVCpu, uint8_t iCrReg, uint64_t *puNewCrX, uint8_t iGReg,
3011 uint8_t cbInstr)
3012{
3013 Assert(puNewCrX);
3014 Assert(iCrReg == 0 || iCrReg == 4);
3015
3016 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3017 Assert(pVmcs);
3018
3019 uint64_t uGuestCrX;
3020 uint64_t fGstHostMask;
3021 uint64_t fReadShadow;
3022 if (iCrReg == 0)
3023 {
3024 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
3025 uGuestCrX = pVCpu->cpum.GstCtx.cr0;
3026 fGstHostMask = pVmcs->u64Cr0Mask.u;
3027 fReadShadow = pVmcs->u64Cr0ReadShadow.u;
3028 }
3029 else
3030 {
3031 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
3032 uGuestCrX = pVCpu->cpum.GstCtx.cr4;
3033 fGstHostMask = pVmcs->u64Cr4Mask.u;
3034 fReadShadow = pVmcs->u64Cr4ReadShadow.u;
3035 }
3036
3037 /*
3038 * For any CR0/CR4 bit owned by the host (in the CR0/CR4 guest/host mask), if the
3039 * corresponding bits differ between the source operand and the read-shadow,
3040 * we must cause a VM-exit.
3041 *
3042 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3043 */
3044 if ((fReadShadow & fGstHostMask) != (*puNewCrX & fGstHostMask))
3045 {
3046 Log2(("mov_Cr_Rd: (CR%u) Guest intercept -> VM-exit\n", iCrReg));
3047
3048 VMXVEXITINFO ExitInfo;
3049 RT_ZERO(ExitInfo);
3050 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3051 ExitInfo.cbInstr = cbInstr;
3052
3053 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, iCrReg)
3054 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3055 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3056 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3057 }
3058
3059 /*
3060 * If the Mov-to-CR0/CR4 did not cause a VM-exit, any bits owned by the host
3061 * must not be modified the instruction.
3062 *
3063 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3064 */
3065 *puNewCrX = (uGuestCrX & fGstHostMask) | (*puNewCrX & ~fGstHostMask);
3066
3067 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3068}
3069
3070
3071/**
3072 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR3' (CR3 read).
3073 *
3074 * @returns VBox strict status code.
3075 * @param pVCpu The cross context virtual CPU structure.
3076 * @param iGReg The general register to which the CR3 value is being stored.
3077 * @param cbInstr The instruction length in bytes.
3078 */
3079IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3080{
3081 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3082 Assert(pVmcs);
3083 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
3084
3085 /*
3086 * If the CR3-store exiting control is set, we must cause a VM-exit.
3087 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3088 */
3089 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT)
3090 {
3091 Log2(("mov_Rd_Cr: (CR3) Guest intercept -> VM-exit\n"));
3092
3093 VMXVEXITINFO ExitInfo;
3094 RT_ZERO(ExitInfo);
3095 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3096 ExitInfo.cbInstr = cbInstr;
3097
3098 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
3099 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ)
3100 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3101 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3102 }
3103
3104 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3105}
3106
3107
3108/**
3109 * VMX VM-exit handler for VM-exits due to 'Mov CR3,GReg' (CR3 write).
3110 *
3111 * @returns VBox strict status code.
3112 * @param pVCpu The cross context virtual CPU structure.
3113 * @param uNewCr3 The new CR3 value.
3114 * @param iGReg The general register from which the CR3 value is being
3115 * loaded.
3116 * @param cbInstr The instruction length in bytes.
3117 */
3118IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr3(PVMCPU pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr)
3119{
3120 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3121 Assert(pVmcs);
3122
3123 /*
3124 * If the CR3-load exiting control is set and the new CR3 value does not
3125 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
3126 *
3127 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3128 */
3129 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_LOAD_EXIT)
3130 {
3131 uint32_t uCr3TargetCount = pVmcs->u32Cr3TargetCount;
3132 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
3133
3134 for (uint32_t idxCr3Target = 0; idxCr3Target < uCr3TargetCount; idxCr3Target++)
3135 {
3136 uint64_t const uCr3TargetValue = iemVmxVmcsGetCr3TargetValue(pVmcs, idxCr3Target);
3137 if (uNewCr3 != uCr3TargetValue)
3138 {
3139 Log2(("mov_Cr_Rd: (CR3) Guest intercept -> VM-exit\n"));
3140
3141 VMXVEXITINFO ExitInfo;
3142 RT_ZERO(ExitInfo);
3143 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3144 ExitInfo.cbInstr = cbInstr;
3145
3146 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
3147 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3148 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3149 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3150 }
3151 }
3152 }
3153
3154 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3155}
3156
3157
3158/**
3159 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR8' (CR8 read).
3160 *
3161 * @returns VBox strict status code.
3162 * @param pVCpu The cross context virtual CPU structure.
3163 * @param iGReg The general register to which the CR8 value is being stored.
3164 * @param cbInstr The instruction length in bytes.
3165 */
3166IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3167{
3168 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3169 Assert(pVmcs);
3170
3171 /*
3172 * If the CR8-store exiting control is set, we must cause a VM-exit.
3173 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3174 */
3175 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT)
3176 {
3177 Log2(("mov_Rd_Cr: (CR8) Guest intercept -> VM-exit\n"));
3178
3179 VMXVEXITINFO ExitInfo;
3180 RT_ZERO(ExitInfo);
3181 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3182 ExitInfo.cbInstr = cbInstr;
3183
3184 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
3185 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ)
3186 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3187 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3188 }
3189
3190 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3191}
3192
3193
3194/**
3195 * Checks guest control registers, debug registers and MSRs as part of VM-entry.
3196 *
3197 * @param pVCpu The cross context virtual CPU structure.
3198 * @param pszInstr The VMX instruction name (for logging purposes).
3199 */
3200IEM_STATIC int iemVmxVmentryCheckGuestControlRegsMsrs(PVMCPU pVCpu, const char *pszInstr)
3201{
3202 /*
3203 * Guest Control Registers, Debug Registers, and MSRs.
3204 * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs".
3205 */
3206 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3207 const char *const pszFailure = "VM-exit";
3208 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
3209
3210 /* CR0 reserved bits. */
3211 {
3212 /* CR0 MB1 bits. */
3213 uint64_t u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
3214 Assert(!(u64Cr0Fixed0 & (X86_CR0_NW | X86_CR0_CD)));
3215 if (fUnrestrictedGuest)
3216 u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
3217 if ((pVmcs->u64GuestCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0)
3218 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed0);
3219
3220 /* CR0 MBZ bits. */
3221 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
3222 if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1)
3223 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1);
3224
3225 /* Without unrestricted guest support, VT-x supports does not support unpaged protected mode. */
3226 if ( !fUnrestrictedGuest
3227 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)
3228 && !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
3229 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0PgPe);
3230 }
3231
3232 /* CR4 reserved bits. */
3233 {
3234 /* CR4 MB1 bits. */
3235 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
3236 if ((pVmcs->u64GuestCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
3237 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0);
3238
3239 /* CR4 MBZ bits. */
3240 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
3241 if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1)
3242 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1);
3243 }
3244
3245 /* DEBUGCTL MSR. */
3246 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
3247 && (pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL))
3248 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDebugCtl);
3249
3250 /* 64-bit CPU checks. */
3251 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
3252 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3253 {
3254 if (fGstInLongMode)
3255 {
3256 /* PAE must be set. */
3257 if ( (pVmcs->u64GuestCr0.u & X86_CR0_PG)
3258 && (pVmcs->u64GuestCr0.u & X86_CR4_PAE))
3259 { /* likely */ }
3260 else
3261 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPae);
3262 }
3263 else
3264 {
3265 /* PCIDE should not be set. */
3266 if (!(pVmcs->u64GuestCr4.u & X86_CR4_PCIDE))
3267 { /* likely */ }
3268 else
3269 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPcide);
3270 }
3271
3272 /* CR3. */
3273 if (!(pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
3274 { /* likely */ }
3275 else
3276 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr3);
3277
3278 /* DR7. */
3279 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
3280 && (pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK))
3281 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDr7);
3282
3283 /* SYSENTER ESP and SYSENTER EIP. */
3284 if ( X86_IS_CANONICAL(pVmcs->u64GuestSysenterEsp.u)
3285 && X86_IS_CANONICAL(pVmcs->u64GuestSysenterEip.u))
3286 { /* likely */ }
3287 else
3288 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSysenterEspEip);
3289 }
3290
3291 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
3292 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
3293
3294 /* PAT MSR. */
3295 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
3296 && !CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u))
3297 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPatMsr);
3298
3299 /* EFER MSR. */
3300 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
3301 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
3302 && (pVmcs->u64GuestEferMsr.u & ~uValidEferMask))
3303 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsrRsvd);
3304
3305 bool const fGstLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
3306 bool const fGstLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
3307 if ( fGstInLongMode == fGstLma
3308 && ( !(pVmcs->u64GuestCr0.u & X86_CR0_PG)
3309 || fGstLma == fGstLme))
3310 { /* likely */ }
3311 else
3312 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsr);
3313
3314 /* We don't support IA32_BNDCFGS MSR yet. */
3315 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
3316
3317 NOREF(pszInstr);
3318 NOREF(pszFailure);
3319 return VINF_SUCCESS;
3320}
3321
3322
3323/**
3324 * Checks guest segment registers, LDTR and TR as part of VM-entry.
3325 *
3326 * @param pVCpu The cross context virtual CPU structure.
3327 * @param pszInstr The VMX instruction name (for logging purposes).
3328 */
3329IEM_STATIC int iemVmxVmentryCheckGuestSegRegs(PVMCPU pVCpu, const char *pszInstr)
3330{
3331 /*
3332 * Segment registers.
3333 * See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
3334 */
3335 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3336 const char *const pszFailure = "VM-exit";
3337 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);
3338 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
3339 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
3340
3341 /* Selectors. */
3342 if ( !fGstInV86Mode
3343 && !fUnrestrictedGuest
3344 && (pVmcs->GuestSs & X86_SEL_RPL) != (pVmcs->GuestCs & X86_SEL_RPL))
3345 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelCsSsRpl);
3346
3347 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
3348 {
3349 CPUMSELREG SelReg;
3350 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &SelReg);
3351 if (RT_LIKELY(rc == VINF_SUCCESS))
3352 { /* likely */ }
3353 else
3354 return rc;
3355
3356 /*
3357 * Virtual-8086 mode checks.
3358 */
3359 if (fGstInV86Mode)
3360 {
3361 /* Base address. */
3362 if (SelReg.u64Base == (uint64_t)SelReg.Sel << 4)
3363 { /* likely */ }
3364 else
3365 {
3366 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBaseV86(iSegReg);
3367 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3368 }
3369
3370 /* Limit. */
3371 if (SelReg.u32Limit == 0xffff)
3372 { /* likely */ }
3373 else
3374 {
3375 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegLimitV86(iSegReg);
3376 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3377 }
3378
3379 /* Attribute. */
3380 if (SelReg.Attr.u == 0xf3)
3381 { /* likely */ }
3382 else
3383 {
3384 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrV86(iSegReg);
3385 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3386 }
3387
3388 /* We're done; move to checking the next segment. */
3389 continue;
3390 }
3391
3392 /* Checks done by 64-bit CPUs. */
3393 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3394 {
3395 /* Base address. */
3396 if ( iSegReg == X86_SREG_FS
3397 || iSegReg == X86_SREG_GS)
3398 {
3399 if (X86_IS_CANONICAL(SelReg.u64Base))
3400 { /* likely */ }
3401 else
3402 {
3403 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
3404 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3405 }
3406 }
3407 else if (iSegReg == X86_SREG_CS)
3408 {
3409 if (!RT_HI_U32(SelReg.u64Base))
3410 { /* likely */ }
3411 else
3412 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseCs);
3413 }
3414 else
3415 {
3416 if ( SelReg.Attr.n.u1Unusable
3417 || !RT_HI_U32(SelReg.u64Base))
3418 { /* likely */ }
3419 else
3420 {
3421 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
3422 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3423 }
3424 }
3425 }
3426
3427 /*
3428 * Checks outside Virtual-8086 mode.
3429 */
3430 uint8_t const uSegType = SelReg.Attr.n.u4Type;
3431 uint8_t const fCodeDataSeg = SelReg.Attr.n.u1DescType;
3432 uint8_t const fUsable = !SelReg.Attr.n.u1Unusable;
3433 uint8_t const uDpl = SelReg.Attr.n.u2Dpl;
3434 uint8_t const fPresent = SelReg.Attr.n.u1Present;
3435 uint8_t const uGranularity = SelReg.Attr.n.u1Granularity;
3436 uint8_t const uDefBig = SelReg.Attr.n.u1DefBig;
3437 uint8_t const fSegLong = SelReg.Attr.n.u1Long;
3438
3439 /* Code or usable segment. */
3440 if ( iSegReg == X86_SREG_CS
3441 || fUsable)
3442 {
3443 /* Reserved bits (bits 31:17 and bits 11:8). */
3444 if (!(SelReg.Attr.u & 0xfffe0f00))
3445 { /* likely */ }
3446 else
3447 {
3448 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrRsvd(iSegReg);
3449 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3450 }
3451
3452 /* Descriptor type. */
3453 if (fCodeDataSeg)
3454 { /* likely */ }
3455 else
3456 {
3457 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDescType(iSegReg);
3458 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3459 }
3460
3461 /* Present. */
3462 if (fPresent)
3463 { /* likely */ }
3464 else
3465 {
3466 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrPresent(iSegReg);
3467 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3468 }
3469
3470 /* Granularity. */
3471 if ( ((SelReg.u32Limit & 0x00000fff) == 0x00000fff || !uGranularity)
3472 && ((SelReg.u32Limit & 0xfff00000) == 0x00000000 || uGranularity))
3473 { /* likely */ }
3474 else
3475 {
3476 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrGran(iSegReg);
3477 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3478 }
3479 }
3480
3481 if (iSegReg == X86_SREG_CS)
3482 {
3483 /* Segment Type and DPL. */
3484 if ( uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
3485 && fUnrestrictedGuest)
3486 {
3487 if (uDpl == 0)
3488 { /* likely */ }
3489 else
3490 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplZero);
3491 }
3492 else if ( uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_ACCESSED)
3493 || uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
3494 {
3495 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
3496 if (uDpl == AttrSs.n.u2Dpl)
3497 { /* likely */ }
3498 else
3499 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplEqSs);
3500 }
3501 else if ((uSegType & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
3502 == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
3503 {
3504 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
3505 if (uDpl <= AttrSs.n.u2Dpl)
3506 { /* likely */ }
3507 else
3508 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplLtSs);
3509 }
3510 else
3511 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsType);
3512
3513 /* Def/Big. */
3514 if ( fGstInLongMode
3515 && fSegLong)
3516 {
3517 if (uDefBig == 0)
3518 { /* likely */ }
3519 else
3520 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDefBig);
3521 }
3522 }
3523 else if (iSegReg == X86_SREG_SS)
3524 {
3525 /* Segment Type. */
3526 if ( !fUsable
3527 || uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
3528 || uSegType == (X86_SEL_TYPE_DOWN | X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED))
3529 { /* likely */ }
3530 else
3531 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsType);
3532
3533 /* DPL. */
3534 if (fUnrestrictedGuest)
3535 {
3536 if (uDpl == (SelReg.Sel & X86_SEL_RPL))
3537 { /* likely */ }
3538 else
3539 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplEqRpl);
3540 }
3541 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
3542 if ( AttrCs.n.u4Type == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
3543 || (pVmcs->u64GuestCr0.u & X86_CR0_PE))
3544 {
3545 if (uDpl == 0)
3546 { /* likely */ }
3547 else
3548 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplZero);
3549 }
3550 }
3551 else
3552 {
3553 /* DS, ES, FS, GS. */
3554 if (fUsable)
3555 {
3556 /* Segment type. */
3557 if (uSegType & X86_SEL_TYPE_ACCESSED)
3558 { /* likely */ }
3559 else
3560 {
3561 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrTypeAcc(iSegReg);
3562 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3563 }
3564
3565 if ( !(uSegType & X86_SEL_TYPE_CODE)
3566 || (uSegType & X86_SEL_TYPE_READ))
3567 { /* likely */ }
3568 else
3569 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsTypeRead);
3570
3571 /* DPL. */
3572 if ( !fUnrestrictedGuest
3573 && uSegType <= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
3574 {
3575 if (uDpl >= (SelReg.Sel & X86_SEL_RPL))
3576 { /* likely */ }
3577 else
3578 {
3579 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDplRpl(iSegReg);
3580 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
3581 }
3582 }
3583 }
3584 }
3585 }
3586
3587 /*
3588 * LDTR.
3589 */
3590 {
3591 CPUMSELREG Ldtr;
3592 Ldtr.Sel = pVmcs->GuestLdtr;
3593 Ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
3594 Ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
3595 Ldtr.Attr.u = pVmcs->u32GuestLdtrLimit;
3596
3597 if (!Ldtr.Attr.n.u1Unusable)
3598 {
3599 /* Selector. */
3600 if (!(Ldtr.Sel & X86_SEL_LDT))
3601 { /* likely */ }
3602 else
3603 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelLdtr);
3604
3605 /* Base. */
3606 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3607 {
3608 if (X86_IS_CANONICAL(Ldtr.u64Base))
3609 { /* likely */ }
3610 else
3611 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseLdtr);
3612 }
3613
3614 /* Attributes. */
3615 /* Reserved bits (bits 31:17 and bits 11:8). */
3616 if (!(Ldtr.Attr.u & 0xfffe0f00))
3617 { /* likely */ }
3618 else
3619 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrRsvd);
3620
3621 if (Ldtr.Attr.n.u4Type == X86_SEL_TYPE_SYS_LDT)
3622 { /* likely */ }
3623 else
3624 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrType);
3625
3626 if (!Ldtr.Attr.n.u1DescType)
3627 { /* likely */ }
3628 else
3629 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrDescType);
3630
3631 if (Ldtr.Attr.n.u1Present)
3632 { /* likely */ }
3633 else
3634 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrPresent);
3635
3636 if ( ((Ldtr.u32Limit & 0x00000fff) == 0x00000fff || !Ldtr.Attr.n.u1Granularity)
3637 && ((Ldtr.u32Limit & 0xfff00000) == 0x00000000 || Ldtr.Attr.n.u1Granularity))
3638 { /* likely */ }
3639 else
3640 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrGran);
3641 }
3642 }
3643
3644 /*
3645 * TR.
3646 */
3647 {
3648 CPUMSELREG Tr;
3649 Tr.Sel = pVmcs->GuestTr;
3650 Tr.u32Limit = pVmcs->u32GuestTrLimit;
3651 Tr.u64Base = pVmcs->u64GuestTrBase.u;
3652 Tr.Attr.u = pVmcs->u32GuestTrLimit;
3653
3654 /* Selector. */
3655 if (!(Tr.Sel & X86_SEL_LDT))
3656 { /* likely */ }
3657 else
3658 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelTr);
3659
3660 /* Base. */
3661 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3662 {
3663 if (X86_IS_CANONICAL(Tr.u64Base))
3664 { /* likely */ }
3665 else
3666 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseTr);
3667 }
3668
3669 /* Attributes. */
3670 /* Reserved bits (bits 31:17 and bits 11:8). */
3671 if (!(Tr.Attr.u & 0xfffe0f00))
3672 { /* likely */ }
3673 else
3674 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrRsvd);
3675
3676 if (!Tr.Attr.n.u1Unusable)
3677 { /* likely */ }
3678 else
3679 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrUnusable);
3680
3681 if ( Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY
3682 || ( !fGstInLongMode
3683 && Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY))
3684 { /* likely */ }
3685 else
3686 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrType);
3687
3688 if (!Tr.Attr.n.u1DescType)
3689 { /* likely */ }
3690 else
3691 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrDescType);
3692
3693 if (Tr.Attr.n.u1Present)
3694 { /* likely */ }
3695 else
3696 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrPresent);
3697
3698 if ( ((Tr.u32Limit & 0x00000fff) == 0x00000fff || !Tr.Attr.n.u1Granularity)
3699 && ((Tr.u32Limit & 0xfff00000) == 0x00000000 || Tr.Attr.n.u1Granularity))
3700 { /* likely */ }
3701 else
3702 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrGran);
3703 }
3704
3705 NOREF(pszInstr);
3706 NOREF(pszFailure);
3707 return VINF_SUCCESS;
3708}
3709
3710
3711/**
3712 * Checks guest GDTR and IDTR as part of VM-entry.
3713 *
3714 * @param pVCpu The cross context virtual CPU structure.
3715 * @param pszInstr The VMX instruction name (for logging purposes).
3716 */
3717IEM_STATIC int iemVmxVmentryCheckGuestGdtrIdtr(PVMCPU pVCpu, const char *pszInstr)
3718{
3719 /*
3720 * GDTR and IDTR.
3721 * See Intel spec. 26.3.1.3 "Checks on Guest Descriptor-Table Registers".
3722 */
3723 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3724 const char *const pszFailure = "VM-exit";
3725
3726 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3727 {
3728 /* Base. */
3729 if (X86_IS_CANONICAL(pVmcs->u64GuestGdtrBase.u))
3730 { /* likely */ }
3731 else
3732 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrBase);
3733
3734 if (X86_IS_CANONICAL(pVmcs->u64GuestIdtrBase.u))
3735 { /* likely */ }
3736 else
3737 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrBase);
3738 }
3739
3740 /* Limit. */
3741 if (!RT_HI_U16(pVmcs->u32GuestGdtrLimit))
3742 { /* likely */ }
3743 else
3744 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrLimit);
3745
3746 if (!RT_HI_U16(pVmcs->u32GuestIdtrLimit))
3747 { /* likely */ }
3748 else
3749 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrLimit);
3750
3751 NOREF(pszInstr);
3752 NOREF(pszFailure);
3753 return VINF_SUCCESS;
3754}
3755
3756
3757/**
3758 * Checks guest RIP and RFLAGS as part of VM-entry.
3759 *
3760 * @param pVCpu The cross context virtual CPU structure.
3761 * @param pszInstr The VMX instruction name (for logging purposes).
3762 */
3763IEM_STATIC int iemVmxVmentryCheckGuestRipRFlags(PVMCPU pVCpu, const char *pszInstr)
3764{
3765 /*
3766 * RIP and RFLAGS.
3767 * See Intel spec. 26.3.1.4 "Checks on Guest RIP and RFLAGS".
3768 */
3769 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3770 const char *const pszFailure = "VM-exit";
3771 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
3772
3773 /* RIP. */
3774 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
3775 {
3776 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
3777 if ( !fGstInLongMode
3778 || !AttrCs.n.u1Long)
3779 {
3780 if (!RT_HI_U32(pVmcs->u64GuestRip.u))
3781 { /* likely */ }
3782 else
3783 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRipRsvd);
3784 }
3785
3786 if ( fGstInLongMode
3787 && AttrCs.n.u1Long)
3788 {
3789 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth == 48); /* Canonical. */
3790 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth < 64
3791 && X86_IS_CANONICAL(pVmcs->u64GuestRip.u))
3792 { /* likely */ }
3793 else
3794 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRip);
3795 }
3796 }
3797
3798 /* RFLAGS (bits 63:22 (or 31:22), bits 15, 5, 3 are reserved, bit 1 MB1). */
3799 uint64_t const uGuestRFlags = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode ? pVmcs->u64GuestRFlags.u
3800 : pVmcs->u64GuestRFlags.s.Lo;
3801 if ( !(uGuestRFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK))
3802 && (uGuestRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK)
3803 { /* likely */ }
3804 else
3805 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsRsvd);
3806
3807 if ( fGstInLongMode
3808 || !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
3809 {
3810 if (!(uGuestRFlags & X86_EFL_VM))
3811 { /* likely */ }
3812 else
3813 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsVm);
3814 }
3815
3816 if ( VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo)
3817 && VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo) == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3818 {
3819 if (uGuestRFlags & X86_EFL_IF)
3820 { /* likely */ }
3821 else
3822 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsIf);
3823 }
3824
3825 NOREF(pszInstr);
3826 NOREF(pszFailure);
3827 return VINF_SUCCESS;
3828}
3829
3830
3831/**
3832 * Checks guest non-register state as part of VM-entry.
3833 *
3834 * @param pVCpu The cross context virtual CPU structure.
3835 * @param pszInstr The VMX instruction name (for logging purposes).
3836 */
3837IEM_STATIC int iemVmxVmentryCheckGuestNonRegState(PVMCPU pVCpu, const char *pszInstr)
3838{
3839 /*
3840 * Guest non-register state.
3841 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
3842 */
3843 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3844 const char *const pszFailure = "VM-exit";
3845
3846 /*
3847 * Activity state.
3848 */
3849 uint64_t const u64GuestVmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
3850 uint32_t const fActivityStateMask = RT_BF_GET(u64GuestVmxMiscMsr, VMX_BF_MISC_ACTIVITY_STATES);
3851 if (!(pVmcs->u32GuestActivityState & fActivityStateMask))
3852 { /* likely */ }
3853 else
3854 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateRsvd);
3855
3856 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
3857 if ( !AttrSs.n.u2Dpl
3858 || pVmcs->u32GuestActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT)
3859 { /* likely */ }
3860 else
3861 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateSsDpl);
3862
3863 if ( pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI
3864 || pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
3865 {
3866 if (pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE)
3867 { /* likely */ }
3868 else
3869 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateStiMovSs);
3870 }
3871
3872 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
3873 {
3874 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
3875 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(pVmcs->u32EntryIntInfo);
3876 AssertCompile(VMX_V_GUEST_ACTIVITY_STATE_MASK == (VMX_VMCS_GUEST_ACTIVITY_HLT | VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN));
3877 switch (pVmcs->u32GuestActivityState)
3878 {
3879 case VMX_VMCS_GUEST_ACTIVITY_HLT:
3880 {
3881 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT
3882 || uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
3883 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
3884 && ( uVector == X86_XCPT_DB
3885 || uVector == X86_XCPT_MC))
3886 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
3887 && uVector == VMX_ENTRY_INT_INFO_VECTOR_MTF))
3888 { /* likely */ }
3889 else
3890 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateHlt);
3891 break;
3892 }
3893
3894 case VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN:
3895 {
3896 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
3897 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
3898 && uVector == X86_XCPT_MC))
3899 { /* likely */ }
3900 else
3901 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateShutdown);
3902 break;
3903 }
3904
3905 case VMX_VMCS_GUEST_ACTIVITY_ACTIVE:
3906 default:
3907 break;
3908 }
3909 }
3910
3911 /*
3912 * Interruptibility state.
3913 */
3914 if (!(pVmcs->u32GuestIntrState & ~VMX_VMCS_GUEST_INT_STATE_MASK))
3915 { /* likely */ }
3916 else
3917 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRsvd);
3918
3919 if ((pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3920 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3921 { /* likely */ }
3922 else
3923 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateStiMovSs);
3924
3925 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_IF)
3926 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3927 { /* likely */ }
3928 else
3929 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRFlagsSti);
3930
3931 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
3932 {
3933 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
3934 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
3935 {
3936 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
3937 { /* likely */ }
3938 else
3939 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateExtInt);
3940 }
3941 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
3942 {
3943 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
3944 { /* likely */ }
3945 else
3946 {
3947 /*
3948 * We don't support injecting NMIs when blocking-by-STI would be in effect.
3949 * We update the VM-exit qualification only when blocking-by-STI is set
3950 * without blocking-by-MovSS being set. Although in practise it does not
3951 * make much difference since the order of checks are implementation defined.
3952 */
3953 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
3954 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_NMI_INJECT);
3955 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateNmi);
3956 }
3957
3958 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
3959 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI))
3960 { /* likely */ }
3961 else
3962 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateVirtNmi);
3963 }
3964 }
3965
3966 /* We don't support SMM yet. So blocking-by-SMIs must not be set. */
3967 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI))
3968 { /* likely */ }
3969 else
3970 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateSmi);
3971
3972 /* We don't support SGX yet. So enclave-interruption must not be set. */
3973 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_ENCLAVE))
3974 { /* likely */ }
3975 else
3976 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateEnclave);
3977
3978 /*
3979 * Pending debug exceptions.
3980 */
3981 uint64_t const uPendingDbgXcpt = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode
3982 ? pVmcs->u64GuestPendingDbgXcpt.u
3983 : pVmcs->u64GuestPendingDbgXcpt.s.Lo;
3984 if (!(uPendingDbgXcpt & ~VMX_VMCS_GUEST_PENDING_DEBUG_VALID_MASK))
3985 { /* likely */ }
3986 else
3987 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRsvd);
3988
3989 if ( (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
3990 || pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
3991 {
3992 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_TF)
3993 && !(pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF)
3994 && !(uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
3995 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf);
3996
3997 if ( ( !(pVmcs->u64GuestRFlags.u & X86_EFL_TF)
3998 || (pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF))
3999 && (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
4000 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf);
4001 }
4002
4003 /* We don't support RTM (Real-time Transactional Memory) yet. */
4004 if (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_RTM)
4005 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRtm);
4006
4007 /*
4008 * VMCS link pointer.
4009 */
4010 if (pVmcs->u64VmcsLinkPtr.u != UINT64_C(0xffffffffffffffff))
4011 {
4012 RTGCPHYS const GCPhysShadowVmcs = pVmcs->u64VmcsLinkPtr.u;
4013 /* We don't support SMM yet (so VMCS link pointer cannot be the current VMCS). */
4014 if (GCPhysShadowVmcs != IEM_VMX_GET_CURRENT_VMCS(pVCpu))
4015 { /* likely */ }
4016 else
4017 {
4018 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
4019 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrCurVmcs);
4020 }
4021
4022 /* Validate the address. */
4023 if ( (GCPhysShadowVmcs & X86_PAGE_4K_OFFSET_MASK)
4024 || (GCPhysShadowVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4025 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysShadowVmcs))
4026 {
4027 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
4028 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmcsLinkPtr);
4029 }
4030
4031 /* Read the VMCS-link pointer from guest memory. */
4032 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs));
4033 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs),
4034 GCPhysShadowVmcs, VMX_V_VMCS_SIZE);
4035 if (RT_FAILURE(rc))
4036 {
4037 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
4038 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys);
4039 }
4040
4041 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
4042 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID)
4043 { /* likely */ }
4044 else
4045 {
4046 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
4047 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrRevId);
4048 }
4049
4050 /* Verify the shadow bit is set if VMCS shadowing is enabled . */
4051 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4052 || pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.fIsShadowVmcs)
4053 { /* likely */ }
4054 else
4055 {
4056 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
4057 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrShadow);
4058 }
4059
4060 /* Finally update our cache of the guest physical address of the shadow VMCS. */
4061 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs = GCPhysShadowVmcs;
4062 }
4063
4064 NOREF(pszInstr);
4065 NOREF(pszFailure);
4066 return VINF_SUCCESS;
4067}
4068
4069
4070/**
4071 * Checks if the PDPTEs referenced by the nested-guest CR3 are valid as part of
4072 * VM-entry.
4073 *
4074 * @returns @c true if all PDPTEs are valid, @c false otherwise.
4075 * @param pVCpu The cross context virtual CPU structure.
4076 * @param pszInstr The VMX instruction name (for logging purposes).
4077 * @param pVmcs Pointer to the virtual VMCS.
4078 */
4079IEM_STATIC int iemVmxVmentryCheckGuestPdptesForCr3(PVMCPU pVCpu, const char *pszInstr, PVMXVVMCS pVmcs)
4080{
4081 /*
4082 * Check PDPTEs.
4083 * See Intel spec. 4.4.1 "PDPTE Registers".
4084 */
4085 uint64_t const uGuestCr3 = pVmcs->u64GuestCr3.u & X86_CR3_PAE_PAGE_MASK;
4086 const char *const pszFailure = "VM-exit";
4087
4088 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
4089 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uGuestCr3, sizeof(aPdptes));
4090 if (RT_SUCCESS(rc))
4091 {
4092 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
4093 {
4094 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
4095 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
4096 { /* likely */ }
4097 else
4098 {
4099 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
4100 VMXVDIAG const enmDiag = iemVmxGetDiagVmentryPdpteRsvd(iPdpte);
4101 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4102 }
4103 }
4104 }
4105 else
4106 {
4107 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
4108 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys);
4109 }
4110
4111 NOREF(pszFailure);
4112 return rc;
4113}
4114
4115
4116/**
4117 * Checks guest PDPTEs as part of VM-entry.
4118 *
4119 * @param pVCpu The cross context virtual CPU structure.
4120 * @param pszInstr The VMX instruction name (for logging purposes).
4121 */
4122IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPU pVCpu, const char *pszInstr)
4123{
4124 /*
4125 * Guest PDPTEs.
4126 * See Intel spec. 26.3.1.5 "Checks on Guest Page-Directory-Pointer-Table Entries".
4127 */
4128 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4129 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4130
4131 /* Check PDPTes if the VM-entry is to a guest using PAE paging. */
4132 int rc;
4133 if ( !fGstInLongMode
4134 && (pVmcs->u64GuestCr4.u & X86_CR4_PAE)
4135 && (pVmcs->u64GuestCr0.u & X86_CR0_PG))
4136 {
4137 /*
4138 * We don't support nested-paging for nested-guests yet.
4139 *
4140 * Without nested-paging for nested-guests, PDPTEs in the VMCS are not used,
4141 * rather we need to check the PDPTEs referenced by the guest CR3.
4142 */
4143 rc = iemVmxVmentryCheckGuestPdptesForCr3(pVCpu, pszInstr, pVmcs);
4144 }
4145 else
4146 rc = VINF_SUCCESS;
4147 return rc;
4148}
4149
4150
4151/**
4152 * Checks guest-state as part of VM-entry.
4153 *
4154 * @returns VBox status code.
4155 * @param pVCpu The cross context virtual CPU structure.
4156 * @param pszInstr The VMX instruction name (for logging purposes).
4157 */
4158IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr)
4159{
4160 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
4161 if (RT_SUCCESS(rc))
4162 {
4163 rc = iemVmxVmentryCheckGuestSegRegs(pVCpu, pszInstr);
4164 if (RT_SUCCESS(rc))
4165 {
4166 rc = iemVmxVmentryCheckGuestGdtrIdtr(pVCpu, pszInstr);
4167 if (RT_SUCCESS(rc))
4168 {
4169 rc = iemVmxVmentryCheckGuestRipRFlags(pVCpu, pszInstr);
4170 if (RT_SUCCESS(rc))
4171 {
4172 rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr);
4173 if (RT_SUCCESS(rc))
4174 return iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr);
4175 }
4176 }
4177 }
4178 }
4179 return rc;
4180}
4181
4182
4183/**
4184 * Checks host-state as part of VM-entry.
4185 *
4186 * @returns VBox status code.
4187 * @param pVCpu The cross context virtual CPU structure.
4188 * @param pszInstr The VMX instruction name (for logging purposes).
4189 */
4190IEM_STATIC int iemVmxVmentryCheckHostState(PVMCPU pVCpu, const char *pszInstr)
4191{
4192 /*
4193 * Host Control Registers and MSRs.
4194 * See Intel spec. 26.2.2 "Checks on Host Control Registers and MSRs".
4195 */
4196 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4197 const char * const pszFailure = "VMFail";
4198
4199 /* CR0 reserved bits. */
4200 {
4201 /* CR0 MB1 bits. */
4202 uint64_t const u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
4203 if ((pVmcs->u64HostCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0)
4204 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0);
4205
4206 /* CR0 MBZ bits. */
4207 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
4208 if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1)
4209 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1);
4210 }
4211
4212 /* CR4 reserved bits. */
4213 {
4214 /* CR4 MB1 bits. */
4215 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
4216 if ((pVmcs->u64HostCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
4217 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0);
4218
4219 /* CR4 MBZ bits. */
4220 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
4221 if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1)
4222 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1);
4223 }
4224
4225 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4226 {
4227 /* CR3 reserved bits. */
4228 if (!(pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
4229 { /* likely */ }
4230 else
4231 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr3);
4232
4233 /* SYSENTER ESP and SYSENTER EIP. */
4234 if ( X86_IS_CANONICAL(pVmcs->u64HostSysenterEsp.u)
4235 && X86_IS_CANONICAL(pVmcs->u64HostSysenterEip.u))
4236 { /* likely */ }
4237 else
4238 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSysenterEspEip);
4239 }
4240
4241 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
4242 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PERF_MSR));
4243
4244 /* PAT MSR. */
4245 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
4246 || CPUMIsPatMsrValid(pVmcs->u64HostPatMsr.u))
4247 { /* likely */ }
4248 else
4249 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostPatMsr);
4250
4251 /* EFER MSR. */
4252 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
4253 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
4254 || !(pVmcs->u64HostEferMsr.u & ~uValidEferMask))
4255 { /* likely */ }
4256 else
4257 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsrRsvd);
4258
4259 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
4260 bool const fHostLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
4261 bool const fHostLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
4262 if ( fHostInLongMode == fHostLma
4263 && fHostInLongMode == fHostLme)
4264 { /* likely */ }
4265 else
4266 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsr);
4267
4268 /*
4269 * Host Segment and Descriptor-Table Registers.
4270 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
4271 */
4272 /* Selector RPL and TI. */
4273 if ( !(pVmcs->HostCs & (X86_SEL_RPL | X86_SEL_LDT))
4274 && !(pVmcs->HostSs & (X86_SEL_RPL | X86_SEL_LDT))
4275 && !(pVmcs->HostDs & (X86_SEL_RPL | X86_SEL_LDT))
4276 && !(pVmcs->HostEs & (X86_SEL_RPL | X86_SEL_LDT))
4277 && !(pVmcs->HostFs & (X86_SEL_RPL | X86_SEL_LDT))
4278 && !(pVmcs->HostGs & (X86_SEL_RPL | X86_SEL_LDT))
4279 && !(pVmcs->HostTr & (X86_SEL_RPL | X86_SEL_LDT)))
4280 { /* likely */ }
4281 else
4282 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSel);
4283
4284 /* CS and TR selectors cannot be 0. */
4285 if ( pVmcs->HostCs
4286 && pVmcs->HostTr)
4287 { /* likely */ }
4288 else
4289 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCsTr);
4290
4291 /* SS cannot be 0 if 32-bit host. */
4292 if ( fHostInLongMode
4293 || pVmcs->HostSs)
4294 { /* likely */ }
4295 else
4296 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSs);
4297
4298 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4299 {
4300 /* FS, GS, GDTR, IDTR, TR base address. */
4301 if ( X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
4302 && X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
4303 && X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u)
4304 && X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u)
4305 && X86_IS_CANONICAL(pVmcs->u64HostTrBase.u))
4306 { /* likely */ }
4307 else
4308 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSegBase);
4309 }
4310
4311 /*
4312 * Host address-space size for 64-bit CPUs.
4313 * See Intel spec. 26.2.4 "Checks Related to Address-Space Size".
4314 */
4315 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4316 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4317 {
4318 bool const fCpuInLongMode = CPUMIsGuestInLongMode(pVCpu);
4319
4320 /* Logical processor in IA-32e mode. */
4321 if (fCpuInLongMode)
4322 {
4323 if (fHostInLongMode)
4324 {
4325 /* PAE must be set. */
4326 if (pVmcs->u64HostCr4.u & X86_CR4_PAE)
4327 { /* likely */ }
4328 else
4329 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pae);
4330
4331 /* RIP must be canonical. */
4332 if (X86_IS_CANONICAL(pVmcs->u64HostRip.u))
4333 { /* likely */ }
4334 else
4335 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRip);
4336 }
4337 else
4338 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostLongMode);
4339 }
4340 else
4341 {
4342 /* Logical processor is outside IA-32e mode. */
4343 if ( !fGstInLongMode
4344 && !fHostInLongMode)
4345 {
4346 /* PCIDE should not be set. */
4347 if (!(pVmcs->u64HostCr4.u & X86_CR4_PCIDE))
4348 { /* likely */ }
4349 else
4350 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pcide);
4351
4352 /* The high 32-bits of RIP MBZ. */
4353 if (!pVmcs->u64HostRip.s.Hi)
4354 { /* likely */ }
4355 else
4356 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRipRsvd);
4357 }
4358 else
4359 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongMode);
4360 }
4361 }
4362 else
4363 {
4364 /* Host address-space size for 32-bit CPUs. */
4365 if ( !fGstInLongMode
4366 && !fHostInLongMode)
4367 { /* likely */ }
4368 else
4369 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongModeNoCpu);
4370 }
4371
4372 NOREF(pszInstr);
4373 NOREF(pszFailure);
4374 return VINF_SUCCESS;
4375}
4376
4377
4378/**
4379 * Checks VM-entry controls fields as part of VM-entry.
4380 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
4381 *
4382 * @returns VBox status code.
4383 * @param pVCpu The cross context virtual CPU structure.
4384 * @param pszInstr The VMX instruction name (for logging purposes).
4385 */
4386IEM_STATIC int iemVmxVmentryCheckEntryCtls(PVMCPU pVCpu, const char *pszInstr)
4387{
4388 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4389 const char * const pszFailure = "VMFail";
4390
4391 /* VM-entry controls. */
4392 VMXCTLSMSR EntryCtls;
4393 EntryCtls.u = CPUMGetGuestIa32VmxEntryCtls(pVCpu);
4394 if (~pVmcs->u32EntryCtls & EntryCtls.n.disallowed0)
4395 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
4396
4397 if (pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1)
4398 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1);
4399
4400 /* Event injection. */
4401 uint32_t const uIntInfo = pVmcs->u32EntryIntInfo;
4402 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VALID))
4403 {
4404 /* Type and vector. */
4405 uint8_t const uType = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_TYPE);
4406 uint8_t const uVector = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VECTOR);
4407 uint8_t const uRsvd = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_RSVD_12_30);
4408 if ( !uRsvd
4409 && HMVmxIsEntryIntInfoTypeValid(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxMonitorTrapFlag, uType)
4410 && HMVmxIsEntryIntInfoVectorValid(uVector, uType))
4411 { /* likely */ }
4412 else
4413 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd);
4414
4415 /* Exception error code. */
4416 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID))
4417 {
4418 /* Delivery possible only in Unrestricted-guest mode when CR0.PE is set. */
4419 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
4420 || (pVmcs->u64GuestCr0.s.Lo & X86_CR0_PE))
4421 { /* likely */ }
4422 else
4423 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodePe);
4424
4425 /* Exceptions that provide an error code. */
4426 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
4427 && ( uVector == X86_XCPT_DF
4428 || uVector == X86_XCPT_TS
4429 || uVector == X86_XCPT_NP
4430 || uVector == X86_XCPT_SS
4431 || uVector == X86_XCPT_GP
4432 || uVector == X86_XCPT_PF
4433 || uVector == X86_XCPT_AC))
4434 { /* likely */ }
4435 else
4436 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec);
4437
4438 /* Exception error-code reserved bits. */
4439 if (!(pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK))
4440 { /* likely */ }
4441 else
4442 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd);
4443
4444 /* Injecting a software interrupt, software exception or privileged software exception. */
4445 if ( uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
4446 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
4447 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
4448 {
4449 /* Instruction length must be in the range 0-15. */
4450 if (pVmcs->u32EntryInstrLen <= VMX_ENTRY_INSTR_LEN_MAX)
4451 { /* likely */ }
4452 else
4453 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLen);
4454
4455 /* Instruction length of 0 is allowed only when its CPU feature is present. */
4456 if ( pVmcs->u32EntryInstrLen == 0
4457 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEntryInjectSoftInt)
4458 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLenZero);
4459 }
4460 }
4461 }
4462
4463 /* VM-entry MSR-load count and VM-entry MSR-load area address. */
4464 if (pVmcs->u32EntryMsrLoadCount)
4465 {
4466 if ( (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
4467 || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4468 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
4469 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad);
4470 }
4471
4472 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)); /* We don't support SMM yet. */
4473 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON)); /* We don't support dual-monitor treatment yet. */
4474
4475 NOREF(pszInstr);
4476 NOREF(pszFailure);
4477 return VINF_SUCCESS;
4478}
4479
4480
4481/**
4482 * Checks VM-exit controls fields as part of VM-entry.
4483 * See Intel spec. 26.2.1.2 "VM-Exit Control Fields".
4484 *
4485 * @returns VBox status code.
4486 * @param pVCpu The cross context virtual CPU structure.
4487 * @param pszInstr The VMX instruction name (for logging purposes).
4488 */
4489IEM_STATIC int iemVmxVmentryCheckExitCtls(PVMCPU pVCpu, const char *pszInstr)
4490{
4491 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4492 const char * const pszFailure = "VMFail";
4493
4494 /* VM-exit controls. */
4495 VMXCTLSMSR ExitCtls;
4496 ExitCtls.u = CPUMGetGuestIa32VmxExitCtls(pVCpu);
4497 if (~pVmcs->u32ExitCtls & ExitCtls.n.disallowed0)
4498 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
4499
4500 if (pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1)
4501 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1);
4502
4503 /* Save preemption timer without activating it. */
4504 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
4505 && (pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
4506 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer);
4507
4508 /* VM-exit MSR-store count and VM-exit MSR-store area address. */
4509 if (pVmcs->u32ExitMsrStoreCount)
4510 {
4511 if ( (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
4512 || (pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4513 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
4514 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore);
4515 }
4516
4517 /* VM-exit MSR-load count and VM-exit MSR-load area address. */
4518 if (pVmcs->u32ExitMsrLoadCount)
4519 {
4520 if ( (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
4521 || (pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4522 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
4523 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad);
4524 }
4525
4526 NOREF(pszInstr);
4527 NOREF(pszFailure);
4528 return VINF_SUCCESS;
4529}
4530
4531
4532/**
4533 * Checks VM-execution controls fields as part of VM-entry.
4534 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
4535 *
4536 * @returns VBox status code.
4537 * @param pVCpu The cross context virtual CPU structure.
4538 * @param pszInstr The VMX instruction name (for logging purposes).
4539 *
4540 * @remarks This may update secondary-processor based VM-execution control fields
4541 * in the current VMCS if necessary.
4542 */
4543IEM_STATIC int iemVmxVmentryCheckExecCtls(PVMCPU pVCpu, const char *pszInstr)
4544{
4545 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4546 const char * const pszFailure = "VMFail";
4547
4548 /* Pin-based VM-execution controls. */
4549 {
4550 VMXCTLSMSR PinCtls;
4551 PinCtls.u = CPUMGetGuestIa32VmxPinbasedCtls(pVCpu);
4552 if (~pVmcs->u32PinCtls & PinCtls.n.disallowed0)
4553 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
4554
4555 if (pVmcs->u32PinCtls & ~PinCtls.n.allowed1)
4556 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1);
4557 }
4558
4559 /* Processor-based VM-execution controls. */
4560 {
4561 VMXCTLSMSR ProcCtls;
4562 ProcCtls.u = CPUMGetGuestIa32VmxProcbasedCtls(pVCpu);
4563 if (~pVmcs->u32ProcCtls & ProcCtls.n.disallowed0)
4564 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
4565
4566 if (pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1)
4567 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1);
4568 }
4569
4570 /* Secondary processor-based VM-execution controls. */
4571 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4572 {
4573 VMXCTLSMSR ProcCtls2;
4574 ProcCtls2.u = CPUMGetGuestIa32VmxProcbasedCtls2(pVCpu);
4575 if (~pVmcs->u32ProcCtls2 & ProcCtls2.n.disallowed0)
4576 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
4577
4578 if (pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1)
4579 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1);
4580 }
4581 else
4582 Assert(!pVmcs->u32ProcCtls2);
4583
4584 /* CR3-target count. */
4585 if (pVmcs->u32Cr3TargetCount <= VMX_V_CR3_TARGET_COUNT)
4586 { /* likely */ }
4587 else
4588 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Cr3TargetCount);
4589
4590 /* IO bitmaps physical addresses. */
4591 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
4592 {
4593 if ( (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK)
4594 || (pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4595 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u))
4596 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA);
4597
4598 if ( (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK)
4599 || (pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4600 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u))
4601 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
4602 }
4603
4604 /* MSR bitmap physical address. */
4605 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
4606 {
4607 RTGCPHYS const GCPhysMsrBitmap = pVmcs->u64AddrMsrBitmap.u;
4608 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
4609 || (GCPhysMsrBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4610 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysMsrBitmap))
4611 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
4612
4613 /* Read the MSR bitmap. */
4614 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
4615 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap),
4616 GCPhysMsrBitmap, VMX_V_MSR_BITMAP_SIZE);
4617 if (RT_FAILURE(rc))
4618 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrBitmapPtrReadPhys);
4619 }
4620
4621 /* TPR shadow related controls. */
4622 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4623 {
4624 /* Virtual-APIC page physical address. */
4625 RTGCPHYS GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
4626 if ( (GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
4627 || (GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4628 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
4629 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
4630
4631 /* Read the Virtual-APIC page. */
4632 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4633 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage),
4634 GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES);
4635 if (RT_FAILURE(rc))
4636 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
4637
4638 /* TPR threshold without virtual-interrupt delivery. */
4639 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4640 && (pVmcs->u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK))
4641 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdRsvd);
4642
4643 /* TPR threshold and VTPR. */
4644 uint8_t const *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4645 uint8_t const u8VTpr = *(pbVirtApic + XAPIC_OFF_TPR);
4646 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
4647 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4648 && RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) > ((u8VTpr >> 4) & UINT32_C(0xf)) /* Bits 4:7 */)
4649 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
4650 }
4651 else
4652 {
4653 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
4654 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4655 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
4656 { /* likely */ }
4657 else
4658 {
4659 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
4660 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicTprShadow);
4661 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4662 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ApicRegVirt);
4663 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4664 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtIntDelivery);
4665 }
4666 }
4667
4668 /* NMI exiting and virtual-NMIs. */
4669 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
4670 && (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
4671 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi);
4672
4673 /* Virtual-NMIs and NMI-window exiting. */
4674 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
4675 && (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
4676 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit);
4677
4678 /* Virtualize APIC accesses. */
4679 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
4680 {
4681 /* APIC-access physical address. */
4682 RTGCPHYS GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
4683 if ( (GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
4684 || (GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4685 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
4686 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
4687 }
4688
4689 /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */
4690 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
4691 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
4692 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
4693
4694 /* Virtual-interrupt delivery requires external interrupt exiting. */
4695 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4696 && !(pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
4697 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
4698
4699 /* VPID. */
4700 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID)
4701 || pVmcs->u16Vpid != 0)
4702 { /* likely */ }
4703 else
4704 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Vpid);
4705
4706 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_POSTED_INT)); /* We don't support posted interrupts yet. */
4707 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)); /* We don't support EPT yet. */
4708 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PML)); /* We don't support PML yet. */
4709 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)); /* We don't support Unrestricted-guests yet. */
4710 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMFUNC)); /* We don't support VM functions yet. */
4711 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT_VE)); /* We don't support EPT-violation #VE yet. */
4712 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)); /* We don't support Pause-loop exiting yet. */
4713
4714 /* VMCS shadowing. */
4715 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4716 {
4717 /* VMREAD-bitmap physical address. */
4718 RTGCPHYS GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
4719 if ( ( GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
4720 || ( GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4721 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
4722 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap);
4723
4724 /* VMWRITE-bitmap physical address. */
4725 RTGCPHYS GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
4726 if ( ( GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
4727 || ( GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
4728 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
4729 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
4730
4731 /* Read the VMREAD-bitmap. */
4732 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
4733 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap),
4734 GCPhysVmreadBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
4735 if (RT_FAILURE(rc))
4736 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys);
4737
4738 /* Read the VMWRITE-bitmap. */
4739 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap));
4740 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap),
4741 GCPhysVmwriteBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
4742 if (RT_FAILURE(rc))
4743 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys);
4744 }
4745
4746 NOREF(pszInstr);
4747 NOREF(pszFailure);
4748 return VINF_SUCCESS;
4749}
4750
4751
4752/**
4753 * Loads the guest control registers, debug register and some MSRs as part of
4754 * VM-entry.
4755 *
4756 * @param pVCpu The cross context virtual CPU structure.
4757 */
4758IEM_STATIC void iemVmxVmentryLoadGuestControlRegsMsrs(PVMCPU pVCpu)
4759{
4760 /*
4761 * Load guest control registers, debug registers and MSRs.
4762 * See Intel spec. 26.3.2.1 "Loading Guest Control Registers, Debug Registers and MSRs".
4763 */
4764 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4765 uint64_t const uGstCr0 = (pVmcs->u64GuestCr0.u & ~VMX_ENTRY_CR0_IGNORE_MASK)
4766 | (pVCpu->cpum.GstCtx.cr0 & VMX_ENTRY_CR0_IGNORE_MASK);
4767 CPUMSetGuestCR0(pVCpu, uGstCr0);
4768 CPUMSetGuestCR4(pVCpu, pVmcs->u64GuestCr4.u);
4769 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64GuestCr3.u;
4770
4771 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4772 pVCpu->cpum.GstCtx.dr[7] = (pVmcs->u64GuestDr7.u & ~VMX_ENTRY_DR7_MBZ_MASK) | VMX_ENTRY_DR7_MB1_MASK;
4773
4774 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64GuestSysenterEip.s.Lo;
4775 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64GuestSysenterEsp.s.Lo;
4776 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32GuestSysenterCS;
4777
4778 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4779 {
4780 /* FS base and GS base are loaded while loading the rest of the guest segment registers. */
4781
4782 /* EFER MSR. */
4783 if (!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR))
4784 {
4785 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4786 bool const fGstPaging = RT_BOOL(uGstCr0 & X86_CR0_PG);
4787 uint64_t const uHostEfer = pVCpu->cpum.GstCtx.msrEFER;
4788 if (fGstInLongMode)
4789 {
4790 /* If the nested-guest is in long mode, LMA and LME are both set. */
4791 Assert(fGstPaging);
4792 pVCpu->cpum.GstCtx.msrEFER = uHostEfer | (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
4793 }
4794 else
4795 {
4796 /*
4797 * If the nested-guest is outside long mode:
4798 * - With paging: LMA is cleared, LME is cleared.
4799 * - Without paging: LMA is cleared, LME is left unmodified.
4800 */
4801 uint64_t const fLmaLmeMask = MSR_K6_EFER_LMA | (fGstPaging ? MSR_K6_EFER_LME : 0);
4802 pVCpu->cpum.GstCtx.msrEFER = uHostEfer & ~fLmaLmeMask;
4803 }
4804 }
4805 /* else: see below. */
4806 }
4807
4808 /* PAT MSR. */
4809 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4810 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64GuestPatMsr.u;
4811
4812 /* EFER MSR. */
4813 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4814 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64GuestEferMsr.u;
4815
4816 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
4817 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
4818
4819 /* We don't support IA32_BNDCFGS MSR yet. */
4820 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
4821
4822 /* Nothing to do for SMBASE register - We don't support SMM yet. */
4823}
4824
4825
4826/**
4827 * Loads the guest segment registers, GDTR, IDTR, LDTR and TR as part of VM-entry.
4828 *
4829 * @param pVCpu The cross context virtual CPU structure.
4830 */
4831IEM_STATIC void iemVmxVmentryLoadGuestSegRegs(PVMCPU pVCpu)
4832{
4833 /*
4834 * Load guest segment registers, GDTR, IDTR, LDTR and TR.
4835 * See Intel spec. 26.3.2.2 "Loading Guest Segment Registers and Descriptor-Table Registers".
4836 */
4837 /* CS, SS, ES, DS, FS, GS. */
4838 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4839 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
4840 {
4841 PCPUMSELREG pGstSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
4842 CPUMSELREG VmcsSelReg;
4843 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &VmcsSelReg);
4844 AssertRC(rc); NOREF(rc);
4845 if (!(VmcsSelReg.Attr.u & X86DESCATTR_UNUSABLE))
4846 {
4847 pGstSelReg->Sel = VmcsSelReg.Sel;
4848 pGstSelReg->ValidSel = VmcsSelReg.Sel;
4849 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
4850 pGstSelReg->u64Base = VmcsSelReg.u64Base;
4851 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
4852 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
4853 }
4854 else
4855 {
4856 pGstSelReg->Sel = VmcsSelReg.Sel;
4857 pGstSelReg->ValidSel = VmcsSelReg.Sel;
4858 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
4859 switch (iSegReg)
4860 {
4861 case X86_SREG_CS:
4862 pGstSelReg->u64Base = VmcsSelReg.u64Base;
4863 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
4864 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
4865 break;
4866
4867 case X86_SREG_SS:
4868 pGstSelReg->u64Base = VmcsSelReg.u64Base & UINT32_C(0xfffffff0);
4869 pGstSelReg->u32Limit = 0;
4870 pGstSelReg->Attr.u = (VmcsSelReg.Attr.u & X86DESCATTR_DPL) | X86DESCATTR_D | X86DESCATTR_UNUSABLE;
4871 break;
4872
4873 case X86_SREG_ES:
4874 case X86_SREG_DS:
4875 pGstSelReg->u64Base = 0;
4876 pGstSelReg->u32Limit = 0;
4877 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
4878 break;
4879
4880 case X86_SREG_FS:
4881 case X86_SREG_GS:
4882 pGstSelReg->u64Base = VmcsSelReg.u64Base;
4883 pGstSelReg->u32Limit = 0;
4884 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
4885 break;
4886 }
4887 Assert(pGstSelReg->Attr.n.u1Unusable);
4888 }
4889 }
4890
4891 /* LDTR. */
4892 pVCpu->cpum.GstCtx.ldtr.Sel = pVmcs->GuestLdtr;
4893 pVCpu->cpum.GstCtx.ldtr.ValidSel = pVmcs->GuestLdtr;
4894 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4895 if (!(pVmcs->u32GuestLdtrAttr & X86DESCATTR_UNUSABLE))
4896 {
4897 pVCpu->cpum.GstCtx.ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
4898 pVCpu->cpum.GstCtx.ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
4899 pVCpu->cpum.GstCtx.ldtr.Attr.u = pVmcs->u32GuestLdtrAttr;
4900 }
4901 else
4902 {
4903 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
4904 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
4905 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
4906 }
4907
4908 /* TR. */
4909 Assert(!(pVmcs->u32GuestTrAttr & X86DESCATTR_UNUSABLE));
4910 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->GuestTr;
4911 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->GuestTr;
4912 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4913 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64GuestTrBase.u;
4914 pVCpu->cpum.GstCtx.tr.u32Limit = pVmcs->u32GuestTrLimit;
4915 pVCpu->cpum.GstCtx.tr.Attr.u = pVmcs->u32GuestTrAttr;
4916
4917 /* GDTR. */
4918 pVCpu->cpum.GstCtx.gdtr.cbGdt = pVmcs->u32GuestGdtrLimit;
4919 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64GuestGdtrBase.u;
4920
4921 /* IDTR. */
4922 pVCpu->cpum.GstCtx.idtr.cbIdt = pVmcs->u32GuestIdtrLimit;
4923 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64GuestIdtrBase.u;
4924}
4925
4926
4927/**
4928 * Loads the guest MSRs from the VM-entry auto-load MSRs as part of VM-entry.
4929 *
4930 * @returns VBox status code.
4931 * @param pVCpu The cross context virtual CPU structure.
4932 * @param pszInstr The VMX instruction name (for logging purposes).
4933 */
4934IEM_STATIC int iemVmxVmentryLoadGuestAutoMsrs(PVMCPU pVCpu, const char *pszInstr)
4935{
4936 /*
4937 * Load guest MSRs.
4938 * See Intel spec. 26.4 "Loading MSRs".
4939 */
4940 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4941 const char *const pszFailure = "VM-exit";
4942
4943 /*
4944 * The VM-entry MSR-load area address need not be a valid guest-physical address if the
4945 * VM-entry MSR load count is 0. If this is the case, bail early without reading it.
4946 * See Intel spec. 24.8.2 "VM-Entry Controls for MSRs".
4947 */
4948 uint32_t const cMsrs = pVmcs->u32EntryMsrLoadCount;
4949 if (!cMsrs)
4950 return VINF_SUCCESS;
4951
4952 /*
4953 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count is
4954 * exceeded including possibly raising #MC exceptions during VMX transition. Our
4955 * implementation shall fail VM-entry with an VMX_EXIT_ERR_MSR_LOAD VM-exit.
4956 */
4957 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
4958 if (fIsMsrCountValid)
4959 { /* likely */ }
4960 else
4961 {
4962 iemVmxVmcsSetExitQual(pVCpu, VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
4963 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadCount);
4964 }
4965
4966 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrEntryMsrLoad.u;
4967 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
4968 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
4969 if (RT_SUCCESS(rc))
4970 {
4971 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
4972 Assert(pMsr);
4973 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
4974 {
4975 if ( !pMsr->u32Reserved
4976 && pMsr->u32Msr != MSR_K8_FS_BASE
4977 && pMsr->u32Msr != MSR_K8_GS_BASE
4978 && pMsr->u32Msr != MSR_K6_EFER
4979 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
4980 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
4981 {
4982 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
4983 if (rcStrict == VINF_SUCCESS)
4984 continue;
4985
4986 /*
4987 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-entry.
4988 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VM-entry failure
4989 * recording the MSR index in the VM-exit qualification (as per the Intel spec.) and indicated
4990 * further by our own, specific diagnostic code. Later, we can try implement handling of the
4991 * MSR in ring-0 if possible, or come up with a better, generic solution.
4992 */
4993 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
4994 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
4995 ? kVmxVDiag_Vmentry_MsrLoadRing3
4996 : kVmxVDiag_Vmentry_MsrLoad;
4997 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4998 }
4999 else
5000 {
5001 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
5002 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadRsvd);
5003 }
5004 }
5005 }
5006 else
5007 {
5008 AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysAutoMsrArea, rc));
5009 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadPtrReadPhys);
5010 }
5011
5012 NOREF(pszInstr);
5013 NOREF(pszFailure);
5014 return VINF_SUCCESS;
5015}
5016
5017
5018/**
5019 * Loads the guest-state non-register state as part of VM-entry.
5020 *
5021 * @returns VBox status code.
5022 * @param pVCpu The cross context virtual CPU structure.
5023 *
5024 * @remarks This must be called only after loading the nested-guest register state
5025 * (especially nested-guest RIP).
5026 */
5027IEM_STATIC void iemVmxVmentryLoadGuestNonRegState(PVMCPU pVCpu)
5028{
5029 /*
5030 * Load guest non-register state.
5031 * See Intel spec. 26.6 "Special Features of VM Entry"
5032 */
5033 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5034 uint32_t const uEntryIntInfo = pVmcs->u32EntryIntInfo;
5035 if (VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
5036 {
5037 /** @todo NSTVMX: Pending debug exceptions. */
5038 Assert(!(pVmcs->u64GuestPendingDbgXcpt.u));
5039
5040 if (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
5041 {
5042 /** @todo NSTVMX: Virtual-NMIs doesn't affect NMI blocking in the normal sense.
5043 * We probably need a different force flag for virtual-NMI
5044 * pending/blocking. */
5045 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI));
5046 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5047 }
5048 else
5049 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));
5050
5051 if (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5052 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
5053 else
5054 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
5055
5056 /* SMI blocking is irrelevant. We don't support SMIs yet. */
5057 }
5058
5059 /* Loading PDPTEs will be taken care when we switch modes. We don't support EPT yet. */
5060 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
5061
5062 /* VPID is irrelevant. We don't support VPID yet. */
5063
5064 /* Clear address-range monitoring. */
5065 EMMonitorWaitClear(pVCpu);
5066}
5067
5068
5069/**
5070 * Loads the guest-state as part of VM-entry.
5071 *
5072 * @returns VBox status code.
5073 * @param pVCpu The cross context virtual CPU structure.
5074 * @param pszInstr The VMX instruction name (for logging purposes).
5075 *
5076 * @remarks This must be done after all the necessary steps prior to loading of
5077 * guest-state (e.g. checking various VMCS state).
5078 */
5079IEM_STATIC int iemVmxVmentryLoadGuestState(PVMCPU pVCpu, const char *pszInstr)
5080{
5081 iemVmxVmentryLoadGuestControlRegsMsrs(pVCpu);
5082 iemVmxVmentryLoadGuestSegRegs(pVCpu);
5083
5084 /*
5085 * Load guest RIP, RSP and RFLAGS.
5086 * See Intel spec. 26.3.2.3 "Loading Guest RIP, RSP and RFLAGS".
5087 */
5088 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5089 pVCpu->cpum.GstCtx.rsp = pVmcs->u64GuestRsp.u;
5090 pVCpu->cpum.GstCtx.rip = pVmcs->u64GuestRip.u;
5091 pVCpu->cpum.GstCtx.rflags.u = pVmcs->u64GuestRFlags.u;
5092
5093 iemVmxVmentryLoadGuestNonRegState(pVCpu);
5094
5095 NOREF(pszInstr);
5096 return VINF_SUCCESS;
5097}
5098
5099
5100/**
5101 * Performs event injection (if any) as part of VM-entry.
5102 *
5103 * @param pVCpu The cross context virtual CPU structure.
5104 * @param pszInstr The VMX instruction name (for logging purposes).
5105 */
5106IEM_STATIC int iemVmxVmentryInjectEvent(PVMCPU pVCpu, const char *pszInstr)
5107{
5108 /*
5109 * Inject events.
5110 * See Intel spec. 26.5 "Event Injection".
5111 */
5112 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5113 uint32_t const uEntryIntInfo = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u32EntryIntInfo;
5114 if (VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
5115 {
5116 /*
5117 * The event that is going to be made pending for injection is not subject to VMX intercepts,
5118 * thus we flag ignoring of intercepts. However, recursive exceptions if any during delivery
5119 * of the current event -are- subject to intercepts, hence this flag will be flipped during
5120 * the actually delivery of this event.
5121 */
5122 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = false;
5123
5124 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
5125 if (uType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT)
5126 {
5127 Assert(VMX_ENTRY_INT_INFO_VECTOR(uEntryIntInfo) == VMX_ENTRY_INT_INFO_VECTOR_MTF);
5128 VMCPU_FF_SET(pVCpu, VMCPU_FF_MTF);
5129 return VINF_SUCCESS;
5130 }
5131
5132 int rc = HMVmxEntryIntInfoInjectTrpmEvent(pVCpu, uEntryIntInfo, pVmcs->u32EntryXcptErrCode, pVmcs->u32EntryInstrLen,
5133 pVCpu->cpum.GstCtx.cr2);
5134 AssertRCReturn(rc, rc);
5135 }
5136
5137 NOREF(pszInstr);
5138 return VINF_SUCCESS;
5139}
5140
5141
5142/**
5143 * VMLAUNCH/VMRESUME instruction execution worker.
5144 *
5145 * @returns Strict VBox status code.
5146 * @param pVCpu The cross context virtual CPU structure.
5147 * @param cbInstr The instruction length in bytes.
5148 * @param uInstrId The instruction identity (VMXINSTRID_VMLAUNCH or
5149 * VMXINSTRID_VMRESUME).
5150 * @param pExitInfo Pointer to the VM-exit instruction information struct.
5151 * Optional, can be NULL.
5152 *
5153 * @remarks Common VMX instruction checks are already expected to by the caller,
5154 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
5155 */
5156IEM_STATIC VBOXSTRICTRC iemVmxVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId, PCVMXVEXITINFO pExitInfo)
5157{
5158 Assert( uInstrId == VMXINSTRID_VMLAUNCH
5159 || uInstrId == VMXINSTRID_VMRESUME);
5160 const char *pszInstr = uInstrId == VMXINSTRID_VMRESUME ? "vmresume" : "vmlaunch";
5161
5162 /* Nested-guest intercept. */
5163 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5164 {
5165 if (pExitInfo)
5166 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
5167 uint32_t const uExitReason = uInstrId == VMXINSTRID_VMRESUME ? VMX_EXIT_VMRESUME : VMX_EXIT_VMLAUNCH;
5168 return iemVmxVmexitInstrNeedsInfo(pVCpu, uExitReason, uInstrId, cbInstr);
5169 }
5170
5171 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
5172
5173 /* CPL. */
5174 if (pVCpu->iem.s.uCpl > 0)
5175 {
5176 Log(("%s: CPL %u -> #GP(0)\n", pszInstr, pVCpu->iem.s.uCpl));
5177 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_Cpl;
5178 return iemRaiseGeneralProtectionFault0(pVCpu);
5179 }
5180
5181 /* Current VMCS valid. */
5182 if (!IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
5183 {
5184 Log(("%s: VMCS pointer %#RGp invalid -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
5185 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrInvalid;
5186 iemVmxVmFailInvalid(pVCpu);
5187 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5188 return VINF_SUCCESS;
5189 }
5190
5191 /** @todo Distinguish block-by-MOV-SS from block-by-STI. Currently we
5192 * use block-by-STI here which is not quite correct. */
5193 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
5194 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
5195 {
5196 Log(("%s: VM entry with events blocked by MOV SS -> VMFail\n", pszInstr));
5197 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_BlocKMovSS;
5198 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_BLOCK_MOVSS);
5199 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5200 return VINF_SUCCESS;
5201 }
5202
5203 if (uInstrId == VMXINSTRID_VMLAUNCH)
5204 {
5205 /* VMLAUNCH with non-clear VMCS. */
5206 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_STATE_CLEAR)
5207 { /* likely */ }
5208 else
5209 {
5210 Log(("vmlaunch: VMLAUNCH with non-clear VMCS -> VMFail\n"));
5211 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsClear;
5212 iemVmxVmFail(pVCpu, VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS);
5213 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5214 return VINF_SUCCESS;
5215 }
5216 }
5217 else
5218 {
5219 /* VMRESUME with non-launched VMCS. */
5220 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_STATE_LAUNCHED)
5221 { /* likely */ }
5222 else
5223 {
5224 Log(("vmresume: VMRESUME with non-launched VMCS -> VMFail\n"));
5225 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsLaunch;
5226 iemVmxVmFail(pVCpu, VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS);
5227 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5228 return VINF_SUCCESS;
5229 }
5230 }
5231
5232 /*
5233 * Load the current VMCS.
5234 */
5235 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
5236 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs),
5237 IEM_VMX_GET_CURRENT_VMCS(pVCpu), VMX_V_VMCS_SIZE);
5238 if (RT_FAILURE(rc))
5239 {
5240 Log(("%s: Failed to read VMCS at %#RGp, rc=%Rrc\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu), rc));
5241 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrReadPhys;
5242 return rc;
5243 }
5244
5245 /*
5246 * We are allowed to cache VMCS related data structures (such as I/O bitmaps, MSR bitmaps)
5247 * while entering VMX non-root mode. We do some of this while checking VM-execution
5248 * controls. The guest hypervisor should not make assumptions and cannot expect
5249 * predictable behavior if changes to these structures are made in guest memory while
5250 * executing in VMX non-root mode. As far as VirtualBox is concerned, the guest cannot
5251 * modify them anyway as we cache them in host memory. We are trade memory for speed here.
5252 *
5253 * See Intel spec. 24.11.4 "Software Access to Related Structures".
5254 */
5255 rc = iemVmxVmentryCheckExecCtls(pVCpu, pszInstr);
5256 if (RT_SUCCESS(rc))
5257 {
5258 rc = iemVmxVmentryCheckExitCtls(pVCpu, pszInstr);
5259 if (RT_SUCCESS(rc))
5260 {
5261 rc = iemVmxVmentryCheckEntryCtls(pVCpu, pszInstr);
5262 if (RT_SUCCESS(rc))
5263 {
5264 rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr);
5265 if (RT_SUCCESS(rc))
5266 {
5267 /* Save the guest force-flags as VM-exits can occur from this point on. */
5268 iemVmxVmentrySaveForceFlags(pVCpu);
5269
5270 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
5271 if (RT_SUCCESS(rc))
5272 {
5273 rc = iemVmxVmentryLoadGuestState(pVCpu, pszInstr);
5274 if (RT_SUCCESS(rc))
5275 {
5276 rc = iemVmxVmentryLoadGuestAutoMsrs(pVCpu, pszInstr);
5277 if (RT_SUCCESS(rc))
5278 {
5279 Assert(rc != VINF_CPUM_R3_MSR_WRITE);
5280
5281 /* VMLAUNCH instruction must update the VMCS launch state. */
5282 if (uInstrId == VMXINSTRID_VMLAUNCH)
5283 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = VMX_V_VMCS_STATE_LAUNCHED;
5284
5285 /* Perform the VMX transition (PGM updates). */
5286 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
5287 if (rcStrict == VINF_SUCCESS)
5288 { /* likely */ }
5289 else if (RT_SUCCESS(rcStrict))
5290 {
5291 Log3(("%s: iemVmxWorldSwitch returns %Rrc -> Setting passup status\n", pszInstr,
5292 VBOXSTRICTRC_VAL(rcStrict)));
5293 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5294 }
5295 else
5296 {
5297 Log3(("%s: iemVmxWorldSwitch failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
5298 return rcStrict;
5299 }
5300
5301 /* We've now entered nested-guest execution. */
5302 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true;
5303
5304 /* Now that we've switched page tables, we can inject events if any. */
5305 iemVmxVmentryInjectEvent(pVCpu, pszInstr);
5306
5307 /** @todo NSTVMX: Setup VMX preemption timer */
5308 /** @todo NSTVMX: TPR thresholding. */
5309
5310 return VINF_SUCCESS;
5311 }
5312 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED);
5313 }
5314 }
5315 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED);
5316 }
5317
5318 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_HOST_STATE);
5319 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5320 return VINF_SUCCESS;
5321 }
5322 }
5323 }
5324
5325 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
5326 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5327 return VINF_SUCCESS;
5328}
5329
5330
5331/**
5332 * Checks whether an RDMSR or WRMSR instruction for the given MSR is intercepted
5333 * (causes a VM-exit) or not.
5334 *
5335 * @returns @c true if the instruction is intercepted, @c false otherwise.
5336 * @param pVCpu The cross context virtual CPU structure.
5337 * @param uExitReason The VM-exit exit reason (VMX_EXIT_RDMSR or
5338 * VMX_EXIT_WRMSR).
5339 * @param idMsr The MSR.
5340 */
5341IEM_STATIC bool iemVmxIsRdmsrWrmsrInterceptSet(PVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr)
5342{
5343 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
5344 Assert( uExitReason == VMX_EXIT_RDMSR
5345 || uExitReason == VMX_EXIT_WRMSR);
5346
5347 /* Consult the MSR bitmap if the feature is supported. */
5348 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5349 Assert(pVmcs);
5350 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
5351 {
5352 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
5353 if (uExitReason == VMX_EXIT_RDMSR)
5354 {
5355 VMXMSREXITREAD enmRead;
5356 int rc = HMVmxGetMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, &enmRead,
5357 NULL /* penmWrite */);
5358 AssertRC(rc);
5359 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
5360 return true;
5361 }
5362 else
5363 {
5364 VMXMSREXITWRITE enmWrite;
5365 int rc = HMVmxGetMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, NULL /* penmRead */,
5366 &enmWrite);
5367 AssertRC(rc);
5368 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
5369 return true;
5370 }
5371 return false;
5372 }
5373
5374 /* Without MSR bitmaps, all MSR accesses are intercepted. */
5375 return true;
5376}
5377
5378
5379/**
5380 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field is
5381 * intercepted (causes a VM-exit) or not.
5382 *
5383 * @returns @c true if the instruction is intercepted, @c false otherwise.
5384 * @param pVCpu The cross context virtual CPU structure.
5385 * @param u64FieldEnc The VMCS field encoding.
5386 * @param uExitReason The VM-exit exit reason (VMX_EXIT_VMREAD or
5387 * VMX_EXIT_VMREAD).
5388 */
5389IEM_STATIC bool iemVmxIsVmreadVmwriteInterceptSet(PVMCPU pVCpu, uint32_t uExitReason, uint64_t u64FieldEnc)
5390{
5391 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
5392 Assert( uExitReason == VMX_EXIT_VMREAD
5393 || uExitReason == VMX_EXIT_VMWRITE);
5394
5395 /* Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted. */
5396 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing)
5397 return true;
5398
5399 /*
5400 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE is intercepted.
5401 * This excludes any reserved bits in the valid parts of the field encoding (i.e. bit 12).
5402 */
5403 if (u64FieldEnc & VMX_VMCS_ENC_RSVD_MASK)
5404 return true;
5405
5406 /* Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not. */
5407 uint32_t u32FieldEnc = RT_LO_U32(u64FieldEnc);
5408 Assert(u32FieldEnc >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
5409 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
5410 uint8_t const *pbBitmap = uExitReason == VMX_EXIT_VMREAD
5411 ? (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap)
5412 : (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap);
5413 pbBitmap += (u32FieldEnc >> 3);
5414 if (*pbBitmap & RT_BIT(u32FieldEnc & 7))
5415 return true;
5416
5417 return false;
5418}
5419
5420
5421/**
5422 * VMREAD common (memory/register) instruction execution worker
5423 *
5424 * @returns Strict VBox status code.
5425 * @param pVCpu The cross context virtual CPU structure.
5426 * @param cbInstr The instruction length in bytes.
5427 * @param pu64Dst Where to write the VMCS value (only updated when
5428 * VINF_SUCCESS is returned).
5429 * @param u64FieldEnc The VMCS field encoding.
5430 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
5431 * be NULL.
5432 */
5433IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
5434 PCVMXVEXITINFO pExitInfo)
5435{
5436 /* Nested-guest intercept. */
5437 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5438 && iemVmxIsVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMREAD, u64FieldEnc))
5439 {
5440 if (pExitInfo)
5441 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
5442 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMREAD, VMXINSTRID_VMREAD, cbInstr);
5443 }
5444
5445 /* CPL. */
5446 if (pVCpu->iem.s.uCpl > 0)
5447 {
5448 Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5449 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Cpl;
5450 return iemRaiseGeneralProtectionFault0(pVCpu);
5451 }
5452
5453 /* VMCS pointer in root mode. */
5454 if ( IEM_VMX_IS_ROOT_MODE(pVCpu)
5455 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
5456 {
5457 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
5458 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrInvalid;
5459 iemVmxVmFailInvalid(pVCpu);
5460 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5461 return VINF_SUCCESS;
5462 }
5463
5464 /* VMCS-link pointer in non-root mode. */
5465 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5466 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
5467 {
5468 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
5469 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_LinkPtrInvalid;
5470 iemVmxVmFailInvalid(pVCpu);
5471 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5472 return VINF_SUCCESS;
5473 }
5474
5475 /* Supported VMCS field. */
5476 if (iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
5477 {
5478 Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
5479 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_FieldInvalid;
5480 iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
5481 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5482 return VINF_SUCCESS;
5483 }
5484
5485 /*
5486 * Setup reading from the current or shadow VMCS.
5487 */
5488 uint8_t *pbVmcs;
5489 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5490 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
5491 else
5492 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5493 Assert(pbVmcs);
5494
5495 VMXVMCSFIELDENC FieldEnc;
5496 FieldEnc.u = RT_LO_U32(u64FieldEnc);
5497 uint8_t const uWidth = FieldEnc.n.u2Width;
5498 uint8_t const uType = FieldEnc.n.u2Type;
5499 uint8_t const uWidthType = (uWidth << 2) | uType;
5500 uint8_t const uIndex = FieldEnc.n.u8Index;
5501 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
5502 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
5503
5504 /*
5505 * Read the VMCS component based on the field's effective width.
5506 *
5507 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
5508 * indicates high bits (little endian).
5509 *
5510 * Note! The caller is responsible to trim the result and update registers
5511 * or memory locations are required. Here we just zero-extend to the largest
5512 * type (i.e. 64-bits).
5513 */
5514 uint8_t *pbField = pbVmcs + offField;
5515 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
5516 switch (uEffWidth)
5517 {
5518 case VMX_VMCS_ENC_WIDTH_64BIT:
5519 case VMX_VMCS_ENC_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break;
5520 case VMX_VMCS_ENC_WIDTH_32BIT: *pu64Dst = *(uint32_t *)pbField; break;
5521 case VMX_VMCS_ENC_WIDTH_16BIT: *pu64Dst = *(uint16_t *)pbField; break;
5522 }
5523 return VINF_SUCCESS;
5524}
5525
5526
5527/**
5528 * VMREAD (64-bit register) instruction execution worker.
5529 *
5530 * @returns Strict VBox status code.
5531 * @param pVCpu The cross context virtual CPU structure.
5532 * @param cbInstr The instruction length in bytes.
5533 * @param pu64Dst Where to store the VMCS field's value.
5534 * @param u64FieldEnc The VMCS field encoding.
5535 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
5536 * be NULL.
5537 */
5538IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
5539 PCVMXVEXITINFO pExitInfo)
5540{
5541 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
5542 if (rcStrict == VINF_SUCCESS)
5543 {
5544 iemVmxVmreadSuccess(pVCpu, cbInstr);
5545 return VINF_SUCCESS;
5546 }
5547
5548 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
5549 return rcStrict;
5550}
5551
5552
5553/**
5554 * VMREAD (32-bit register) instruction execution worker.
5555 *
5556 * @returns Strict VBox status code.
5557 * @param pVCpu The cross context virtual CPU structure.
5558 * @param cbInstr The instruction length in bytes.
5559 * @param pu32Dst Where to store the VMCS field's value.
5560 * @param u32FieldEnc The VMCS field encoding.
5561 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
5562 * be NULL.
5563 */
5564IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPU pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint64_t u32FieldEnc,
5565 PCVMXVEXITINFO pExitInfo)
5566{
5567 uint64_t u64Dst;
5568 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u32FieldEnc, pExitInfo);
5569 if (rcStrict == VINF_SUCCESS)
5570 {
5571 *pu32Dst = u64Dst;
5572 iemVmxVmreadSuccess(pVCpu, cbInstr);
5573 return VINF_SUCCESS;
5574 }
5575
5576 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
5577 return rcStrict;
5578}
5579
5580
5581/**
5582 * VMREAD (memory) instruction execution worker.
5583 *
5584 * @returns Strict VBox status code.
5585 * @param pVCpu The cross context virtual CPU structure.
5586 * @param cbInstr The instruction length in bytes.
5587 * @param iEffSeg The effective segment register to use with @a u64Val.
5588 * Pass UINT8_MAX if it is a register access.
5589 * @param enmEffAddrMode The effective addressing mode (only used with memory
5590 * operand).
5591 * @param GCPtrDst The guest linear address to store the VMCS field's
5592 * value.
5593 * @param u64FieldEnc The VMCS field encoding.
5594 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
5595 * be NULL.
5596 */
5597IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode,
5598 RTGCPTR GCPtrDst, uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
5599{
5600 uint64_t u64Dst;
5601 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u64FieldEnc, pExitInfo);
5602 if (rcStrict == VINF_SUCCESS)
5603 {
5604 /*
5605 * Write the VMCS field's value to the location specified in guest-memory.
5606 *
5607 * The pointer size depends on the address size (address-size prefix allowed).
5608 * The operand size depends on IA-32e mode (operand-size prefix not allowed).
5609 */
5610 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
5611 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
5612 GCPtrDst &= s_auAddrSizeMasks[enmEffAddrMode];
5613
5614 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5615 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
5616 else
5617 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
5618 if (rcStrict == VINF_SUCCESS)
5619 {
5620 iemVmxVmreadSuccess(pVCpu, cbInstr);
5621 return VINF_SUCCESS;
5622 }
5623
5624 Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
5625 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrMap;
5626 return rcStrict;
5627 }
5628
5629 Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
5630 return rcStrict;
5631}
5632
5633
5634/**
5635 * VMWRITE instruction execution worker.
5636 *
5637 * @returns Strict VBox status code.
5638 * @param pVCpu The cross context virtual CPU structure.
5639 * @param cbInstr The instruction length in bytes.
5640 * @param iEffSeg The effective segment register to use with @a u64Val.
5641 * Pass UINT8_MAX if it is a register access.
5642 * @param enmEffAddrMode The effective addressing mode (only used with memory
5643 * operand).
5644 * @param u64Val The value to write (or guest linear address to the
5645 * value), @a iEffSeg will indicate if it's a memory
5646 * operand.
5647 * @param u64FieldEnc The VMCS field encoding.
5648 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
5649 * be NULL.
5650 */
5651IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, uint64_t u64Val,
5652 uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
5653{
5654 /* Nested-guest intercept. */
5655 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5656 && iemVmxIsVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMWRITE, u64FieldEnc))
5657 {
5658 if (pExitInfo)
5659 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
5660 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMWRITE, VMXINSTRID_VMWRITE, cbInstr);
5661 }
5662
5663 /* CPL. */
5664 if (pVCpu->iem.s.uCpl > 0)
5665 {
5666 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5667 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Cpl;
5668 return iemRaiseGeneralProtectionFault0(pVCpu);
5669 }
5670
5671 /* VMCS pointer in root mode. */
5672 if ( IEM_VMX_IS_ROOT_MODE(pVCpu)
5673 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
5674 {
5675 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
5676 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrInvalid;
5677 iemVmxVmFailInvalid(pVCpu);
5678 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5679 return VINF_SUCCESS;
5680 }
5681
5682 /* VMCS-link pointer in non-root mode. */
5683 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
5684 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
5685 {
5686 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
5687 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_LinkPtrInvalid;
5688 iemVmxVmFailInvalid(pVCpu);
5689 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5690 return VINF_SUCCESS;
5691 }
5692
5693 /* If the VMWRITE instruction references memory, access the specified memory operand. */
5694 bool const fIsRegOperand = iEffSeg == UINT8_MAX;
5695 if (!fIsRegOperand)
5696 {
5697 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
5698 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
5699 RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[enmEffAddrMode];
5700
5701 /* Read the value from the specified guest memory location. */
5702 VBOXSTRICTRC rcStrict;
5703 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5704 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
5705 else
5706 {
5707 uint32_t u32Val;
5708 rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, iEffSeg, GCPtrVal);
5709 u64Val = u32Val;
5710 }
5711 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5712 {
5713 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
5714 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrMap;
5715 return rcStrict;
5716 }
5717 }
5718 else
5719 Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
5720
5721 /* Supported VMCS field. */
5722 if (!iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
5723 {
5724 Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
5725 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldInvalid;
5726 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
5727 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5728 return VINF_SUCCESS;
5729 }
5730
5731 /* Read-only VMCS field. */
5732 bool const fIsFieldReadOnly = HMVmxIsVmcsFieldReadOnly(u64FieldEnc);
5733 if ( fIsFieldReadOnly
5734 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
5735 {
5736 Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64FieldEnc));
5737 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldRo;
5738 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
5739 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5740 return VINF_SUCCESS;
5741 }
5742
5743 /*
5744 * Setup writing to the current or shadow VMCS.
5745 */
5746 uint8_t *pbVmcs;
5747 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5748 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
5749 else
5750 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5751 Assert(pbVmcs);
5752
5753 VMXVMCSFIELDENC FieldEnc;
5754 FieldEnc.u = RT_LO_U32(u64FieldEnc);
5755 uint8_t const uWidth = FieldEnc.n.u2Width;
5756 uint8_t const uType = FieldEnc.n.u2Type;
5757 uint8_t const uWidthType = (uWidth << 2) | uType;
5758 uint8_t const uIndex = FieldEnc.n.u8Index;
5759 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
5760 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
5761
5762 /*
5763 * Write the VMCS component based on the field's effective width.
5764 *
5765 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
5766 * indicates high bits (little endian).
5767 */
5768 uint8_t *pbField = pbVmcs + offField;
5769 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
5770 switch (uEffWidth)
5771 {
5772 case VMX_VMCS_ENC_WIDTH_64BIT:
5773 case VMX_VMCS_ENC_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
5774 case VMX_VMCS_ENC_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break;
5775 case VMX_VMCS_ENC_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break;
5776 }
5777
5778 iemVmxVmSucceed(pVCpu);
5779 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5780 return VINF_SUCCESS;
5781}
5782
5783
5784/**
5785 * VMCLEAR instruction execution worker.
5786 *
5787 * @returns Strict VBox status code.
5788 * @param pVCpu The cross context virtual CPU structure.
5789 * @param cbInstr The instruction length in bytes.
5790 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
5791 * @param GCPtrVmcs The linear address of the VMCS pointer.
5792 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
5793 * be NULL.
5794 *
5795 * @remarks Common VMX instruction checks are already expected to by the caller,
5796 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
5797 */
5798IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
5799 PCVMXVEXITINFO pExitInfo)
5800{
5801 /* Nested-guest intercept. */
5802 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5803 {
5804 if (pExitInfo)
5805 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
5806 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMCLEAR, VMXINSTRID_NONE, cbInstr);
5807 }
5808
5809 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
5810
5811 /* CPL. */
5812 if (pVCpu->iem.s.uCpl > 0)
5813 {
5814 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5815 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Cpl;
5816 return iemRaiseGeneralProtectionFault0(pVCpu);
5817 }
5818
5819 /* Get the VMCS pointer from the location specified by the source memory operand. */
5820 RTGCPHYS GCPhysVmcs;
5821 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
5822 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5823 {
5824 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
5825 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrMap;
5826 return rcStrict;
5827 }
5828
5829 /* VMCS pointer alignment. */
5830 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
5831 {
5832 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
5833 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAlign;
5834 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
5835 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5836 return VINF_SUCCESS;
5837 }
5838
5839 /* VMCS physical-address width limits. */
5840 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5841 {
5842 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
5843 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrWidth;
5844 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
5845 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5846 return VINF_SUCCESS;
5847 }
5848
5849 /* VMCS is not the VMXON region. */
5850 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
5851 {
5852 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
5853 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrVmxon;
5854 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
5855 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5856 return VINF_SUCCESS;
5857 }
5858
5859 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
5860 restriction imposed by our implementation. */
5861 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
5862 {
5863 Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
5864 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAbnormal;
5865 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
5866 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5867 return VINF_SUCCESS;
5868 }
5869
5870 /*
5871 * VMCLEAR allows committing and clearing any valid VMCS pointer.
5872 *
5873 * If the current VMCS is the one being cleared, set its state to 'clear' and commit
5874 * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
5875 * to 'clear'.
5876 */
5877 uint8_t const fVmcsStateClear = VMX_V_VMCS_STATE_CLEAR;
5878 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
5879 {
5880 Assert(GCPhysVmcs != NIL_RTGCPHYS); /* Paranoia. */
5881 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
5882 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsStateClear;
5883 iemVmxCommitCurrentVmcsToMemory(pVCpu);
5884 Assert(!IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
5885 }
5886 else
5887 {
5888 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPtrVmcs + RT_OFFSETOF(VMXVVMCS, fVmcsState),
5889 (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear));
5890 }
5891
5892 iemVmxVmSucceed(pVCpu);
5893 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5894 return rcStrict;
5895}
5896
5897
5898/**
5899 * VMPTRST instruction execution worker.
5900 *
5901 * @returns Strict VBox status code.
5902 * @param pVCpu The cross context virtual CPU structure.
5903 * @param cbInstr The instruction length in bytes.
5904 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
5905 * @param GCPtrVmcs The linear address of where to store the current VMCS
5906 * pointer.
5907 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
5908 * be NULL.
5909 *
5910 * @remarks Common VMX instruction checks are already expected to by the caller,
5911 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
5912 */
5913IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
5914 PCVMXVEXITINFO pExitInfo)
5915{
5916 /* Nested-guest intercept. */
5917 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5918 {
5919 if (pExitInfo)
5920 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
5921 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRST, VMXINSTRID_NONE, cbInstr);
5922 }
5923
5924 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
5925
5926 /* CPL. */
5927 if (pVCpu->iem.s.uCpl > 0)
5928 {
5929 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5930 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Cpl;
5931 return iemRaiseGeneralProtectionFault0(pVCpu);
5932 }
5933
5934 /* Set the VMCS pointer to the location specified by the destination memory operand. */
5935 AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
5936 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
5937 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5938 {
5939 iemVmxVmSucceed(pVCpu);
5940 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
5941 return rcStrict;
5942 }
5943
5944 Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
5945 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_PtrMap;
5946 return rcStrict;
5947}
5948
5949
5950/**
5951 * VMPTRLD instruction execution worker.
5952 *
5953 * @returns Strict VBox status code.
5954 * @param pVCpu The cross context virtual CPU structure.
5955 * @param cbInstr The instruction length in bytes.
5956 * @param GCPtrVmcs The linear address of the current VMCS pointer.
5957 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
5958 * be NULL.
5959 *
5960 * @remarks Common VMX instruction checks are already expected to by the caller,
5961 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
5962 */
5963IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
5964 PCVMXVEXITINFO pExitInfo)
5965{
5966 /* Nested-guest intercept. */
5967 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5968 {
5969 if (pExitInfo)
5970 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
5971 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRLD, VMXINSTRID_NONE, cbInstr);
5972 }
5973
5974 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
5975
5976 /* CPL. */
5977 if (pVCpu->iem.s.uCpl > 0)
5978 {
5979 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
5980 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Cpl;
5981 return iemRaiseGeneralProtectionFault0(pVCpu);
5982 }
5983
5984 /* Get the VMCS pointer from the location specified by the source memory operand. */
5985 RTGCPHYS GCPhysVmcs;
5986 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
5987 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5988 {
5989 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
5990 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrMap;
5991 return rcStrict;
5992 }
5993
5994 /* VMCS pointer alignment. */
5995 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
5996 {
5997 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
5998 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAlign;
5999 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
6000 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6001 return VINF_SUCCESS;
6002 }
6003
6004 /* VMCS physical-address width limits. */
6005 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6006 {
6007 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
6008 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrWidth;
6009 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
6010 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6011 return VINF_SUCCESS;
6012 }
6013
6014 /* VMCS is not the VMXON region. */
6015 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
6016 {
6017 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
6018 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrVmxon;
6019 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
6020 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6021 return VINF_SUCCESS;
6022 }
6023
6024 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
6025 restriction imposed by our implementation. */
6026 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
6027 {
6028 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
6029 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAbnormal;
6030 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
6031 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6032 return VINF_SUCCESS;
6033 }
6034
6035 /* Read the VMCS revision ID from the VMCS. */
6036 VMXVMCSREVID VmcsRevId;
6037 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
6038 if (RT_FAILURE(rc))
6039 {
6040 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
6041 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrReadPhys;
6042 return rc;
6043 }
6044
6045 /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
6046 also check VMCS shadowing feature. */
6047 if ( VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
6048 || ( VmcsRevId.n.fIsShadowVmcs
6049 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
6050 {
6051 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
6052 {
6053 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
6054 VmcsRevId.n.u31RevisionId));
6055 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_VmcsRevId;
6056 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
6057 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6058 return VINF_SUCCESS;
6059 }
6060
6061 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
6062 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_ShadowVmcs;
6063 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
6064 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6065 return VINF_SUCCESS;
6066 }
6067
6068 /*
6069 * We only maintain only the current VMCS in our virtual CPU context (CPUMCTX). Therefore,
6070 * VMPTRLD shall always flush any existing current VMCS back to guest memory before loading
6071 * a new VMCS as current.
6072 */
6073 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
6074 {
6075 iemVmxCommitCurrentVmcsToMemory(pVCpu);
6076 IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
6077 }
6078
6079 iemVmxVmSucceed(pVCpu);
6080 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6081 return VINF_SUCCESS;
6082}
6083
6084
6085/**
6086 * VMXON instruction execution worker.
6087 *
6088 * @returns Strict VBox status code.
6089 * @param pVCpu The cross context virtual CPU structure.
6090 * @param cbInstr The instruction length in bytes.
6091 * @param iEffSeg The effective segment register to use with @a
6092 * GCPtrVmxon.
6093 * @param GCPtrVmxon The linear address of the VMXON pointer.
6094 * @param pExitInfo Pointer to the VM-exit instruction information struct.
6095 * Optional, can be NULL.
6096 *
6097 * @remarks Common VMX instruction checks are already expected to by the caller,
6098 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
6099 */
6100IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
6101 PCVMXVEXITINFO pExitInfo)
6102{
6103#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
6104 RT_NOREF5(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
6105 return VINF_EM_RAW_EMULATE_INSTR;
6106#else
6107 if (!IEM_VMX_IS_ROOT_MODE(pVCpu))
6108 {
6109 /* CPL. */
6110 if (pVCpu->iem.s.uCpl > 0)
6111 {
6112 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
6113 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cpl;
6114 return iemRaiseGeneralProtectionFault0(pVCpu);
6115 }
6116
6117 /* A20M (A20 Masked) mode. */
6118 if (!PGMPhysIsA20Enabled(pVCpu))
6119 {
6120 Log(("vmxon: A20M mode -> #GP(0)\n"));
6121 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_A20M;
6122 return iemRaiseGeneralProtectionFault0(pVCpu);
6123 }
6124
6125 /* CR0. */
6126 {
6127 /* CR0 MB1 bits. */
6128 uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
6129 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
6130 {
6131 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
6132 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed0;
6133 return iemRaiseGeneralProtectionFault0(pVCpu);
6134 }
6135
6136 /* CR0 MBZ bits. */
6137 uint64_t const uCr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
6138 if (pVCpu->cpum.GstCtx.cr0 & ~uCr0Fixed1)
6139 {
6140 Log(("vmxon: CR0 fixed1 bits set -> #GP(0)\n"));
6141 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed1;
6142 return iemRaiseGeneralProtectionFault0(pVCpu);
6143 }
6144 }
6145
6146 /* CR4. */
6147 {
6148 /* CR4 MB1 bits. */
6149 uint64_t const uCr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
6150 if ((pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0) != uCr4Fixed0)
6151 {
6152 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
6153 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed0;
6154 return iemRaiseGeneralProtectionFault0(pVCpu);
6155 }
6156
6157 /* CR4 MBZ bits. */
6158 uint64_t const uCr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
6159 if (pVCpu->cpum.GstCtx.cr4 & ~uCr4Fixed1)
6160 {
6161 Log(("vmxon: CR4 fixed1 bits set -> #GP(0)\n"));
6162 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed1;
6163 return iemRaiseGeneralProtectionFault0(pVCpu);
6164 }
6165 }
6166
6167 /* Feature control MSR's LOCK and VMXON bits. */
6168 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
6169 if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
6170 {
6171 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
6172 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_MsrFeatCtl;
6173 return iemRaiseGeneralProtectionFault0(pVCpu);
6174 }
6175
6176 /* Get the VMXON pointer from the location specified by the source memory operand. */
6177 RTGCPHYS GCPhysVmxon;
6178 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
6179 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
6180 {
6181 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
6182 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrMap;
6183 return rcStrict;
6184 }
6185
6186 /* VMXON region pointer alignment. */
6187 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
6188 {
6189 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
6190 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAlign;
6191 iemVmxVmFailInvalid(pVCpu);
6192 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6193 return VINF_SUCCESS;
6194 }
6195
6196 /* VMXON physical-address width limits. */
6197 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6198 {
6199 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
6200 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrWidth;
6201 iemVmxVmFailInvalid(pVCpu);
6202 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6203 return VINF_SUCCESS;
6204 }
6205
6206 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
6207 restriction imposed by our implementation. */
6208 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
6209 {
6210 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
6211 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAbnormal;
6212 iemVmxVmFailInvalid(pVCpu);
6213 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6214 return VINF_SUCCESS;
6215 }
6216
6217 /* Read the VMCS revision ID from the VMXON region. */
6218 VMXVMCSREVID VmcsRevId;
6219 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
6220 if (RT_FAILURE(rc))
6221 {
6222 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
6223 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrReadPhys;
6224 return rc;
6225 }
6226
6227 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
6228 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
6229 {
6230 /* Revision ID mismatch. */
6231 if (!VmcsRevId.n.fIsShadowVmcs)
6232 {
6233 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
6234 VmcsRevId.n.u31RevisionId));
6235 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmcsRevId;
6236 iemVmxVmFailInvalid(pVCpu);
6237 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6238 return VINF_SUCCESS;
6239 }
6240
6241 /* Shadow VMCS disallowed. */
6242 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
6243 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_ShadowVmcs;
6244 iemVmxVmFailInvalid(pVCpu);
6245 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6246 return VINF_SUCCESS;
6247 }
6248
6249 /*
6250 * Record that we're in VMX operation, block INIT, block and disable A20M.
6251 */
6252 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
6253 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
6254 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
6255
6256 /* Clear address-range monitoring. */
6257 EMMonitorWaitClear(pVCpu);
6258 /** @todo NSTVMX: Intel PT. */
6259
6260 iemVmxVmSucceed(pVCpu);
6261 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6262# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
6263 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
6264# else
6265 return VINF_SUCCESS;
6266# endif
6267 }
6268 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6269 {
6270 /* Nested-guest intercept. */
6271 if (pExitInfo)
6272 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
6273 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMXON, VMXINSTRID_NONE, cbInstr);
6274 }
6275
6276 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
6277
6278 /* CPL. */
6279 if (pVCpu->iem.s.uCpl > 0)
6280 {
6281 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
6282 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxRootCpl;
6283 return iemRaiseGeneralProtectionFault0(pVCpu);
6284 }
6285
6286 /* VMXON when already in VMX root mode. */
6287 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
6288 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxAlreadyRoot;
6289 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6290 return VINF_SUCCESS;
6291#endif
6292}
6293
6294
6295/**
6296 * Implements 'VMXOFF'.
6297 *
6298 * @remarks Common VMX instruction checks are already expected to by the caller,
6299 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
6300 */
6301IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
6302{
6303# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
6304 RT_NOREF2(pVCpu, cbInstr);
6305 return VINF_EM_RAW_EMULATE_INSTR;
6306# else
6307 /* Nested-guest intercept. */
6308 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6309 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMXOFF, cbInstr);
6310
6311 /* CPL. */
6312 if (pVCpu->iem.s.uCpl > 0)
6313 {
6314 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
6315 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxoff_Cpl;
6316 return iemRaiseGeneralProtectionFault0(pVCpu);
6317 }
6318
6319 /* Dual monitor treatment of SMIs and SMM. */
6320 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
6321 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
6322 {
6323 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
6324 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6325 return VINF_SUCCESS;
6326 }
6327
6328 /* Record that we're no longer in VMX root operation, block INIT, block and disable A20M. */
6329 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
6330 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
6331
6332 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
6333 { /** @todo NSTVMX: Unblock SMI. */ }
6334
6335 EMMonitorWaitClear(pVCpu);
6336 /** @todo NSTVMX: Unblock and enable A20M. */
6337
6338 iemVmxVmSucceed(pVCpu);
6339 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6340# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
6341 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
6342# else
6343 return VINF_SUCCESS;
6344# endif
6345# endif
6346}
6347
6348
6349/**
6350 * Implements 'VMXON'.
6351 */
6352IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
6353{
6354 return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
6355}
6356
6357
6358/**
6359 * Implements 'VMLAUNCH'.
6360 */
6361IEM_CIMPL_DEF_0(iemCImpl_vmlaunch)
6362{
6363 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMLAUNCH, NULL /* pExitInfo */);
6364}
6365
6366
6367/**
6368 * Implements 'VMRESUME'.
6369 */
6370IEM_CIMPL_DEF_0(iemCImpl_vmresume)
6371{
6372 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMRESUME, NULL /* pExitInfo */);
6373}
6374
6375
6376/**
6377 * Implements 'VMPTRLD'.
6378 */
6379IEM_CIMPL_DEF_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
6380{
6381 return iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
6382}
6383
6384
6385/**
6386 * Implements 'VMPTRST'.
6387 */
6388IEM_CIMPL_DEF_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
6389{
6390 return iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
6391}
6392
6393
6394/**
6395 * Implements 'VMCLEAR'.
6396 */
6397IEM_CIMPL_DEF_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
6398{
6399 return iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
6400}
6401
6402
6403/**
6404 * Implements 'VMWRITE' register.
6405 */
6406IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64FieldEnc)
6407{
6408 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, IEMMODE_64BIT /* N/A */, u64Val, u64FieldEnc,
6409 NULL /* pExitInfo */);
6410}
6411
6412
6413/**
6414 * Implements 'VMWRITE' memory.
6415 */
6416IEM_CIMPL_DEF_4(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrVal, uint32_t, u64FieldEnc)
6417{
6418 return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrVal, u64FieldEnc, NULL /* pExitInfo */);
6419}
6420
6421
6422/**
6423 * Implements 'VMREAD' 64-bit register.
6424 */
6425IEM_CIMPL_DEF_2(iemCImpl_vmread64_reg, uint64_t *, pu64Dst, uint64_t, u64FieldEnc)
6426{
6427 return iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, NULL /* pExitInfo */);
6428}
6429
6430
6431/**
6432 * Implements 'VMREAD' 32-bit register.
6433 */
6434IEM_CIMPL_DEF_2(iemCImpl_vmread32_reg, uint32_t *, pu32Dst, uint32_t, u32FieldEnc)
6435{
6436 return iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u32FieldEnc, NULL /* pExitInfo */);
6437}
6438
6439
6440/**
6441 * Implements 'VMREAD' memory.
6442 */
6443IEM_CIMPL_DEF_4(iemCImpl_vmread_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrDst, uint32_t, u64FieldEnc)
6444{
6445 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, u64FieldEnc, NULL /* pExitInfo */);
6446}
6447
6448#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
6449
6450
6451/**
6452 * Implements 'VMCALL'.
6453 */
6454IEM_CIMPL_DEF_0(iemCImpl_vmcall)
6455{
6456#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6457 /* Nested-guest intercept. */
6458 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6459 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMCALL, cbInstr);
6460#endif
6461
6462 /* Join forces with vmmcall. */
6463 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
6464}
6465
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette