VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 75412

Last change on this file since 75412 was 75412, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 linux build fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 313.0 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 75412 2018-11-13 04:06:57Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
20/** @todo NSTVMX: The following VM-exit intercepts are pending:
21 * VMX_EXIT_IO_SMI
22 * VMX_EXIT_SMI
23 * VMX_EXIT_INT_WINDOW
24 * VMX_EXIT_NMI_WINDOW
25 * VMX_EXIT_GETSEC
26 * VMX_EXIT_RSM
27 * VMX_EXIT_MTF
28 * VMX_EXIT_MONITOR (APIC access VM-exit caused by MONITOR pending)
29 * VMX_EXIT_ERR_MACHINE_CHECK
30 * VMX_EXIT_TPR_BELOW_THRESHOLD
31 * VMX_EXIT_APIC_ACCESS
32 * VMX_EXIT_VIRTUALIZED_EOI
33 * VMX_EXIT_EPT_VIOLATION
34 * VMX_EXIT_EPT_MISCONFIG
35 * VMX_EXIT_INVEPT
36 * VMX_EXIT_PREEMPT_TIMER
37 * VMX_EXIT_INVVPID
38 * VMX_EXIT_APIC_WRITE
39 * VMX_EXIT_RDRAND
40 * VMX_EXIT_VMFUNC
41 * VMX_EXIT_ENCLS
42 * VMX_EXIT_RDSEED
43 * VMX_EXIT_PML_FULL
44 * VMX_EXIT_XSAVES
45 * VMX_EXIT_XRSTORS
46 */
47
48/**
49 * Map of VMCS field encodings to their virtual-VMCS structure offsets.
50 *
51 * The first array dimension is VMCS field encoding of Width OR'ed with Type and the
52 * second dimension is the Index, see VMXVMCSFIELDENC.
53 */
54uint16_t const g_aoffVmcsMap[16][VMX_V_VMCS_MAX_INDEX + 1] =
55{
56 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
57 {
58 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u16Vpid),
59 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u16PostIntNotifyVector),
60 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u16EptpIndex),
61 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
62 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
63 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
64 },
65 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
66 {
67 /* 0-7 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
68 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
69 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
70 /* 24-25 */ UINT16_MAX, UINT16_MAX
71 },
72 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
73 {
74 /* 0 */ RT_UOFFSETOF(VMXVVMCS, GuestEs),
75 /* 1 */ RT_UOFFSETOF(VMXVVMCS, GuestCs),
76 /* 2 */ RT_UOFFSETOF(VMXVVMCS, GuestSs),
77 /* 3 */ RT_UOFFSETOF(VMXVVMCS, GuestDs),
78 /* 4 */ RT_UOFFSETOF(VMXVVMCS, GuestFs),
79 /* 5 */ RT_UOFFSETOF(VMXVVMCS, GuestGs),
80 /* 6 */ RT_UOFFSETOF(VMXVVMCS, GuestLdtr),
81 /* 7 */ RT_UOFFSETOF(VMXVVMCS, GuestTr),
82 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u16GuestIntStatus),
83 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u16PmlIndex),
84 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
85 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
86 },
87 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
88 {
89 /* 0 */ RT_UOFFSETOF(VMXVVMCS, HostEs),
90 /* 1 */ RT_UOFFSETOF(VMXVVMCS, HostCs),
91 /* 2 */ RT_UOFFSETOF(VMXVVMCS, HostSs),
92 /* 3 */ RT_UOFFSETOF(VMXVVMCS, HostDs),
93 /* 4 */ RT_UOFFSETOF(VMXVVMCS, HostFs),
94 /* 5 */ RT_UOFFSETOF(VMXVVMCS, HostGs),
95 /* 6 */ RT_UOFFSETOF(VMXVVMCS, HostTr),
96 /* 7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
97 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
98 /* 23-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
99 },
100 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
101 {
102 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64AddrIoBitmapA),
103 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
104 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
105 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64AddrExitMsrStore),
106 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64AddrExitMsrLoad),
107 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEntryMsrLoad),
108 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
109 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64AddrPml),
110 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64TscOffset),
111 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVirtApic),
112 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64AddrApicAccess),
113 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64AddrPostedIntDesc),
114 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64VmFuncCtls),
115 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64EptpPtr),
116 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap0),
117 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap1),
118 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap2),
119 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap3),
120 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEptpList),
121 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVmreadBitmap),
122 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVmwriteBitmap),
123 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u64AddrXcptVeInfo),
124 /* 22 */ RT_UOFFSETOF(VMXVVMCS, u64XssBitmap),
125 /* 23 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEnclsBitmap),
126 /* 24 */ UINT16_MAX,
127 /* 25 */ RT_UOFFSETOF(VMXVVMCS, u64TscMultiplier)
128 },
129 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
130 {
131 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64RoGuestPhysAddr),
132 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
133 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
134 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
135 /* 25 */ UINT16_MAX
136 },
137 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
138 {
139 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
140 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
141 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPatMsr),
142 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64GuestEferMsr),
143 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
144 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte0),
145 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte1),
146 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte2),
147 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte3),
148 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
149 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
150 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
151 },
152 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
153 {
154 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64HostPatMsr),
155 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64HostEferMsr),
156 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
157 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
158 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
159 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
160 },
161 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
162 {
163 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32PinCtls),
164 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32ProcCtls),
165 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32XcptBitmap),
166 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32XcptPFMask),
167 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32XcptPFMatch),
168 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32Cr3TargetCount),
169 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32ExitCtls),
170 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
171 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
172 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u32EntryCtls),
173 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
174 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u32EntryIntInfo),
175 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
176 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u32EntryInstrLen),
177 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u32TprThreshold),
178 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u32ProcCtls2),
179 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u32PleGap),
180 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u32PleWindow),
181 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
182 },
183 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
184 {
185 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32RoVmInstrError),
186 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitReason),
187 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitIntInfo),
188 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitIntErrCode),
189 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
190 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
191 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitInstrLen),
192 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitInstrInfo),
193 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
194 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
195 /* 24-25 */ UINT16_MAX, UINT16_MAX
196 },
197 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
198 {
199 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsLimit),
200 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32GuestCsLimit),
201 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSsLimit),
202 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32GuestDsLimit),
203 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsLimit),
204 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32GuestFsLimit),
205 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGsLimit),
206 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
207 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u32GuestTrLimit),
208 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
209 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
210 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsAttr),
211 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u32GuestCsAttr),
212 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSsAttr),
213 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u32GuestDsAttr),
214 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u32GuestFsAttr),
215 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGsAttr),
216 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
217 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u32GuestTrAttr),
218 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u32GuestIntrState),
219 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u32GuestActivityState),
220 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSmBase),
221 /* 22 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSysenterCS),
222 /* 23 */ RT_UOFFSETOF(VMXVVMCS, u32PreemptTimer),
223 /* 24-25 */ UINT16_MAX, UINT16_MAX
224 },
225 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
226 {
227 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32HostSysenterCs),
228 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
229 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
230 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
231 /* 25 */ UINT16_MAX
232 },
233 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_CONTROL: */
234 {
235 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64Cr0Mask),
236 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64Cr4Mask),
237 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
238 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
239 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target0),
240 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target1),
241 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target2),
242 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target3),
243 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
244 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
245 /* 24-25 */ UINT16_MAX, UINT16_MAX
246 },
247 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
248 {
249 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64RoExitQual),
250 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRcx),
251 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRsi),
252 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRdi),
253 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRip),
254 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64RoGuestLinearAddr),
255 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
256 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
257 /* 22-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
258 },
259 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
260 {
261 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr0),
262 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr3),
263 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr4),
264 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64GuestEsBase),
265 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCsBase),
266 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSsBase),
267 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDsBase),
268 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64GuestFsBase),
269 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64GuestGsBase),
270 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64GuestLdtrBase),
271 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64GuestTrBase),
272 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64GuestGdtrBase),
273 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64GuestIdtrBase),
274 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDr7),
275 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRsp),
276 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRip),
277 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRFlags),
278 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpt),
279 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
280 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSysenterEip),
281 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
282 },
283 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_HOST_STATE: */
284 {
285 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr0),
286 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr3),
287 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr4),
288 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64HostFsBase),
289 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64HostGsBase),
290 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64HostTrBase),
291 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64HostGdtrBase),
292 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64HostIdtrBase),
293 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64HostSysenterEsp),
294 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64HostSysenterEip),
295 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64HostRsp),
296 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64HostRip),
297 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
298 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
299 }
300};
301
302
303/**
304 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
305 * relative offsets.
306 */
307# ifdef IEM_WITH_CODE_TLB
308# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
309# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
310# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
311# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
312# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
313# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
314# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
315# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
316# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
317# else /* !IEM_WITH_CODE_TLB */
318# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
319 do \
320 { \
321 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
322 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
323 } while (0)
324
325# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
326
327# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
328 do \
329 { \
330 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
331 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
332 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
333 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
334 } while (0)
335
336# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
337 do \
338 { \
339 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
340 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
341 } while (0)
342
343# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
344 do \
345 { \
346 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
347 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
348 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
349 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
350 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
351 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
352 } while (0)
353
354# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
355 do \
356 { \
357 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
358 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
359 } while (0)
360
361# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
362 do \
363 { \
364 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
365 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
366 } while (0)
367
368# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
369 do \
370 { \
371 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
372 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
373 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
374 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
375 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
376 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
377 } while (0)
378# endif /* !IEM_WITH_CODE_TLB */
379
380/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
381#define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
382
383/** Whether a shadow VMCS is present for the given VCPU. */
384#define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
385
386/** Gets the VMXON region pointer. */
387#define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
388
389/** Gets the guest-physical address of the current VMCS for the given VCPU. */
390#define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
391
392/** Whether a current VMCS is present for the given VCPU. */
393#define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
394
395/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
396#define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
397 do \
398 { \
399 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
400 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
401 } while (0)
402
403/** Clears any current VMCS for the given VCPU. */
404#define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
405 do \
406 { \
407 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
408 } while (0)
409
410/** Check for VMX instructions requiring to be in VMX operation.
411 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs updating. */
412#define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
413 do \
414 { \
415 if (IEM_VMX_IS_ROOT_MODE(a_pVCpu)) \
416 { /* likely */ } \
417 else \
418 { \
419 Log((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
420 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
421 return iemRaiseUndefinedOpcode(a_pVCpu); \
422 } \
423 } while (0)
424
425/** Marks a VM-entry failure with a diagnostic reason, logs and returns. */
426#define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag) \
427 do \
428 { \
429 Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_VmxDiag), \
430 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
431 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
432 return VERR_VMX_VMENTRY_FAILED; \
433 } while (0)
434
435/** Marks a VM-exit failure with a diagnostic reason, logs and returns. */
436#define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \
437 do \
438 { \
439 Log(("VM-exit failed! uExitReason=%u enmDiag=%u (%s) -> %s\n", (a_uExitReason), (a_VmxDiag), \
440 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
441 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
442 return VERR_VMX_VMEXIT_FAILED; \
443 } while (0)
444
445
446/**
447 * Returns whether the given VMCS field is valid and supported by our emulation.
448 *
449 * @param pVCpu The cross context virtual CPU structure.
450 * @param u64FieldEnc The VMCS field encoding.
451 *
452 * @remarks This takes into account the CPU features exposed to the guest.
453 */
454IEM_STATIC bool iemVmxIsVmcsFieldValid(PVMCPU pVCpu, uint64_t u64FieldEnc)
455{
456 uint32_t const uFieldEncHi = RT_HI_U32(u64FieldEnc);
457 uint32_t const uFieldEncLo = RT_LO_U32(u64FieldEnc);
458 if (!uFieldEncHi)
459 { /* likely */ }
460 else
461 return false;
462
463 PCCPUMFEATURES pFeat = IEM_GET_GUEST_CPU_FEATURES(pVCpu);
464 switch (uFieldEncLo)
465 {
466 /*
467 * 16-bit fields.
468 */
469 /* Control fields. */
470 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
471 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
472 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
473
474 /* Guest-state fields. */
475 case VMX_VMCS16_GUEST_ES_SEL:
476 case VMX_VMCS16_GUEST_CS_SEL:
477 case VMX_VMCS16_GUEST_SS_SEL:
478 case VMX_VMCS16_GUEST_DS_SEL:
479 case VMX_VMCS16_GUEST_FS_SEL:
480 case VMX_VMCS16_GUEST_GS_SEL:
481 case VMX_VMCS16_GUEST_LDTR_SEL:
482 case VMX_VMCS16_GUEST_TR_SEL:
483 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
484 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
485
486 /* Host-state fields. */
487 case VMX_VMCS16_HOST_ES_SEL:
488 case VMX_VMCS16_HOST_CS_SEL:
489 case VMX_VMCS16_HOST_SS_SEL:
490 case VMX_VMCS16_HOST_DS_SEL:
491 case VMX_VMCS16_HOST_FS_SEL:
492 case VMX_VMCS16_HOST_GS_SEL:
493 case VMX_VMCS16_HOST_TR_SEL: return true;
494
495 /*
496 * 64-bit fields.
497 */
498 /* Control fields. */
499 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
500 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
501 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
502 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
503 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
504 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
505 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
506 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
507 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
508 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
509 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
510 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
511 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
512 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
513 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
514 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
515 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
516 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
517 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
518 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
519 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
520 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
521 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
522 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
523 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
524 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
525 case VMX_VMCS64_CTRL_EPTP_FULL:
526 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
527 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
528 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
529 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
530 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
531 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
532 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
533 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
534 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
535 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
536 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
537 {
538 uint64_t const uVmFuncMsr = CPUMGetGuestIa32VmxVmFunc(pVCpu);
539 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
540 }
541 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
542 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
543 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
544 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
545 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
546 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
547 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
548 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
549 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
550 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
551 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
552 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
553
554 /* Read-only data fields. */
555 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
556 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
557
558 /* Guest-state fields. */
559 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
560 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
561 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
562 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
563 case VMX_VMCS64_GUEST_PAT_FULL:
564 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
565 case VMX_VMCS64_GUEST_EFER_FULL:
566 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
567 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
568 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
569 case VMX_VMCS64_GUEST_PDPTE0_FULL:
570 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
571 case VMX_VMCS64_GUEST_PDPTE1_FULL:
572 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
573 case VMX_VMCS64_GUEST_PDPTE2_FULL:
574 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
575 case VMX_VMCS64_GUEST_PDPTE3_FULL:
576 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
577 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
578 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
579
580 /* Host-state fields. */
581 case VMX_VMCS64_HOST_PAT_FULL:
582 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
583 case VMX_VMCS64_HOST_EFER_FULL:
584 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
585 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
586 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
587
588 /*
589 * 32-bit fields.
590 */
591 /* Control fields. */
592 case VMX_VMCS32_CTRL_PIN_EXEC:
593 case VMX_VMCS32_CTRL_PROC_EXEC:
594 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
595 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
596 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
597 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
598 case VMX_VMCS32_CTRL_EXIT:
599 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
600 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
601 case VMX_VMCS32_CTRL_ENTRY:
602 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
603 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
604 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
605 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
606 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
607 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
608 case VMX_VMCS32_CTRL_PLE_GAP:
609 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
610
611 /* Read-only data fields. */
612 case VMX_VMCS32_RO_VM_INSTR_ERROR:
613 case VMX_VMCS32_RO_EXIT_REASON:
614 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
615 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
616 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
617 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
618 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
619 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
620
621 /* Guest-state fields. */
622 case VMX_VMCS32_GUEST_ES_LIMIT:
623 case VMX_VMCS32_GUEST_CS_LIMIT:
624 case VMX_VMCS32_GUEST_SS_LIMIT:
625 case VMX_VMCS32_GUEST_DS_LIMIT:
626 case VMX_VMCS32_GUEST_FS_LIMIT:
627 case VMX_VMCS32_GUEST_GS_LIMIT:
628 case VMX_VMCS32_GUEST_LDTR_LIMIT:
629 case VMX_VMCS32_GUEST_TR_LIMIT:
630 case VMX_VMCS32_GUEST_GDTR_LIMIT:
631 case VMX_VMCS32_GUEST_IDTR_LIMIT:
632 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
633 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
634 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
635 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
636 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
637 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
638 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
639 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
640 case VMX_VMCS32_GUEST_INT_STATE:
641 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
642 case VMX_VMCS32_GUEST_SMBASE:
643 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
644 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
645
646 /* Host-state fields. */
647 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
648
649 /*
650 * Natural-width fields.
651 */
652 /* Control fields. */
653 case VMX_VMCS_CTRL_CR0_MASK:
654 case VMX_VMCS_CTRL_CR4_MASK:
655 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
656 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
657 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
658 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
659 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
660 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
661
662 /* Read-only data fields. */
663 case VMX_VMCS_RO_EXIT_QUALIFICATION:
664 case VMX_VMCS_RO_IO_RCX:
665 case VMX_VMCS_RO_IO_RSX:
666 case VMX_VMCS_RO_IO_RDI:
667 case VMX_VMCS_RO_IO_RIP:
668 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
669
670 /* Guest-state fields. */
671 case VMX_VMCS_GUEST_CR0:
672 case VMX_VMCS_GUEST_CR3:
673 case VMX_VMCS_GUEST_CR4:
674 case VMX_VMCS_GUEST_ES_BASE:
675 case VMX_VMCS_GUEST_CS_BASE:
676 case VMX_VMCS_GUEST_SS_BASE:
677 case VMX_VMCS_GUEST_DS_BASE:
678 case VMX_VMCS_GUEST_FS_BASE:
679 case VMX_VMCS_GUEST_GS_BASE:
680 case VMX_VMCS_GUEST_LDTR_BASE:
681 case VMX_VMCS_GUEST_TR_BASE:
682 case VMX_VMCS_GUEST_GDTR_BASE:
683 case VMX_VMCS_GUEST_IDTR_BASE:
684 case VMX_VMCS_GUEST_DR7:
685 case VMX_VMCS_GUEST_RSP:
686 case VMX_VMCS_GUEST_RIP:
687 case VMX_VMCS_GUEST_RFLAGS:
688 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
689 case VMX_VMCS_GUEST_SYSENTER_ESP:
690 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
691
692 /* Host-state fields. */
693 case VMX_VMCS_HOST_CR0:
694 case VMX_VMCS_HOST_CR3:
695 case VMX_VMCS_HOST_CR4:
696 case VMX_VMCS_HOST_FS_BASE:
697 case VMX_VMCS_HOST_GS_BASE:
698 case VMX_VMCS_HOST_TR_BASE:
699 case VMX_VMCS_HOST_GDTR_BASE:
700 case VMX_VMCS_HOST_IDTR_BASE:
701 case VMX_VMCS_HOST_SYSENTER_ESP:
702 case VMX_VMCS_HOST_SYSENTER_EIP:
703 case VMX_VMCS_HOST_RSP:
704 case VMX_VMCS_HOST_RIP: return true;
705 }
706
707 return false;
708}
709
710
711/**
712 * Gets a host selector from the VMCS.
713 *
714 * @param pVmcs Pointer to the virtual VMCS.
715 * @param iSelReg The index of the segment register (X86_SREG_XXX).
716 */
717DECLINLINE(RTSEL) iemVmxVmcsGetHostSelReg(PCVMXVVMCS pVmcs, uint8_t iSegReg)
718{
719 Assert(iSegReg < X86_SREG_COUNT);
720 RTSEL HostSel;
721 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
722 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
723 uint8_t const uWidthType = (uWidth << 2) | uType;
724 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
725 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
726 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
727 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
728 uint8_t const *pbField = pbVmcs + offField;
729 HostSel = *(uint16_t *)pbField;
730 return HostSel;
731}
732
733
734/**
735 * Sets a guest segment register in the VMCS.
736 *
737 * @param pVmcs Pointer to the virtual VMCS.
738 * @param iSegReg The index of the segment register (X86_SREG_XXX).
739 * @param pSelReg Pointer to the segment register.
740 */
741IEM_STATIC void iemVmxVmcsSetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCCPUMSELREG pSelReg)
742{
743 Assert(pSelReg);
744 Assert(iSegReg < X86_SREG_COUNT);
745
746 /* Selector. */
747 {
748 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
749 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
750 uint8_t const uWidthType = (uWidth << 2) | uType;
751 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
752 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
753 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
754 uint8_t *pbVmcs = (uint8_t *)pVmcs;
755 uint8_t *pbField = pbVmcs + offField;
756 *(uint16_t *)pbField = pSelReg->Sel;
757 }
758
759 /* Limit. */
760 {
761 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
762 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
763 uint8_t const uWidthType = (uWidth << 2) | uType;
764 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
765 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
766 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
767 uint8_t *pbVmcs = (uint8_t *)pVmcs;
768 uint8_t *pbField = pbVmcs + offField;
769 *(uint32_t *)pbField = pSelReg->u32Limit;
770 }
771
772 /* Base. */
773 {
774 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
775 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
776 uint8_t const uWidthType = (uWidth << 2) | uType;
777 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
778 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
779 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
780 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
781 uint8_t const *pbField = pbVmcs + offField;
782 *(uint64_t *)pbField = pSelReg->u64Base;
783 }
784
785 /* Attributes. */
786 {
787 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
788 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
789 | X86DESCATTR_UNUSABLE;
790 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
791 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
792 uint8_t const uWidthType = (uWidth << 2) | uType;
793 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
794 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
795 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
796 uint8_t *pbVmcs = (uint8_t *)pVmcs;
797 uint8_t *pbField = pbVmcs + offField;
798 *(uint32_t *)pbField = pSelReg->Attr.u & fValidAttrMask;
799 }
800}
801
802
803/**
804 * Gets a guest segment register from the VMCS.
805 *
806 * @returns VBox status code.
807 * @param pVmcs Pointer to the virtual VMCS.
808 * @param iSegReg The index of the segment register (X86_SREG_XXX).
809 * @param pSelReg Where to store the segment register (only updated when
810 * VINF_SUCCESS is returned).
811 *
812 * @remarks Warning! This does not validate the contents of the retrieved segment
813 * register.
814 */
815IEM_STATIC int iemVmxVmcsGetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCPUMSELREG pSelReg)
816{
817 Assert(pSelReg);
818 Assert(iSegReg < X86_SREG_COUNT);
819
820 /* Selector. */
821 uint16_t u16Sel;
822 {
823 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
824 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
825 uint8_t const uWidthType = (uWidth << 2) | uType;
826 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
827 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
828 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
829 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
830 uint8_t const *pbField = pbVmcs + offField;
831 u16Sel = *(uint16_t *)pbField;
832 }
833
834 /* Limit. */
835 uint32_t u32Limit;
836 {
837 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
838 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
839 uint8_t const uWidthType = (uWidth << 2) | uType;
840 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
841 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
842 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
843 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
844 uint8_t const *pbField = pbVmcs + offField;
845 u32Limit = *(uint32_t *)pbField;
846 }
847
848 /* Base. */
849 uint64_t u64Base;
850 {
851 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
852 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
853 uint8_t const uWidthType = (uWidth << 2) | uType;
854 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
855 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
856 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
857 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
858 uint8_t const *pbField = pbVmcs + offField;
859 u64Base = *(uint64_t *)pbField;
860 /** @todo NSTVMX: Should we zero out high bits here for 32-bit virtual CPUs? */
861 }
862
863 /* Attributes. */
864 uint32_t u32Attr;
865 {
866 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
867 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
868 uint8_t const uWidthType = (uWidth << 2) | uType;
869 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
870 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
871 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
872 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
873 uint8_t const *pbField = pbVmcs + offField;
874 u32Attr = *(uint32_t *)pbField;
875 }
876
877 pSelReg->Sel = u16Sel;
878 pSelReg->ValidSel = u16Sel;
879 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
880 pSelReg->u32Limit = u32Limit;
881 pSelReg->u64Base = u64Base;
882 pSelReg->Attr.u = u32Attr;
883 return VINF_SUCCESS;
884}
885
886
887/**
888 * Gets a CR3 target value from the VMCS.
889 *
890 * @returns VBox status code.
891 * @param pVmcs Pointer to the virtual VMCS.
892 * @param idxCr3Target The index of the CR3-target value to retrieve.
893 * @param puValue Where to store the CR3-target value.
894 */
895DECLINLINE(uint64_t) iemVmxVmcsGetCr3TargetValue(PCVMXVVMCS pVmcs, uint8_t idxCr3Target)
896{
897 Assert(idxCr3Target < VMX_V_CR3_TARGET_COUNT);
898 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
899 uint8_t const uType = VMX_VMCS_ENC_TYPE_CONTROL;
900 uint8_t const uWidthType = (uWidth << 2) | uType;
901 uint8_t const uIndex = (idxCr3Target << 1) + RT_BF_GET(VMX_VMCS_CTRL_CR3_TARGET_VAL0, VMX_BF_VMCS_ENC_INDEX);
902 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
903 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
904 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
905 uint8_t const *pbField = pbVmcs + offField;
906 uint64_t const uCr3TargetValue = *(uint64_t *)pbField;
907
908 return uCr3TargetValue;
909}
910
911
912/**
913 * Masks the nested-guest CR0/CR4 mask subjected to the corresponding guest/host
914 * mask and the read-shadow (CR0/CR4 read).
915 *
916 * @returns The masked CR0/CR4.
917 * @param pVCpu The cross context virtual CPU structure.
918 * @param iCrReg The control register (either CR0 or CR4).
919 * @param uGuestCrX The current guest CR0 or guest CR4.
920 */
921IEM_STATIC uint64_t iemVmxMaskCr0CR4(PVMCPU pVCpu, uint8_t iCrReg, uint64_t uGuestCrX)
922{
923 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
924 Assert(iCrReg == 0 || iCrReg == 4);
925
926 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
927 Assert(pVmcs);
928
929 /*
930 * For each CR0 or CR4 bit owned by the host, the corresponding bit is loaded from the
931 * CR0 read shadow or CR4 read shadow. For each CR0 or CR4 bit that is not owned by the
932 * host, the corresponding bit from the guest CR0 or guest CR4 is loaded.
933 *
934 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
935 */
936 uint64_t fGstHostMask;
937 uint64_t fReadShadow;
938 if (iCrReg == 0)
939 {
940 fGstHostMask = pVmcs->u64Cr0Mask.u;
941 fReadShadow = pVmcs->u64Cr0ReadShadow.u;
942 }
943 else
944 {
945 fGstHostMask = pVmcs->u64Cr4Mask.u;
946 fReadShadow = pVmcs->u64Cr4ReadShadow.u;
947 }
948
949 uint64_t const fMaskedCrX = (fReadShadow & fGstHostMask) | (uGuestCrX & ~fGstHostMask);
950 return fMaskedCrX;
951}
952
953
954/**
955 * Gets VM-exit instruction information along with any displacement for an
956 * instruction VM-exit.
957 *
958 * @returns The VM-exit instruction information.
959 * @param pVCpu The cross context virtual CPU structure.
960 * @param uExitReason The VM-exit reason.
961 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_XXX).
962 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
963 * NULL.
964 */
965IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, PRTGCPTR pGCPtrDisp)
966{
967 RTGCPTR GCPtrDisp;
968 VMXEXITINSTRINFO ExitInstrInfo;
969 ExitInstrInfo.u = 0;
970
971 /*
972 * Get and parse the ModR/M byte from our decoded opcodes.
973 */
974 uint8_t bRm;
975 uint8_t const offModRm = pVCpu->iem.s.offModRm;
976 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
977 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
978 {
979 /*
980 * ModR/M indicates register addressing.
981 *
982 * The primary/secondary register operands are reported in the iReg1 or iReg2
983 * fields depending on whether it is a read/write form.
984 */
985 uint8_t idxReg1;
986 uint8_t idxReg2;
987 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
988 {
989 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
990 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
991 }
992 else
993 {
994 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
995 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
996 }
997 ExitInstrInfo.All.u2Scaling = 0;
998 ExitInstrInfo.All.iReg1 = idxReg1;
999 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
1000 ExitInstrInfo.All.fIsRegOperand = 1;
1001 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
1002 ExitInstrInfo.All.iSegReg = 0;
1003 ExitInstrInfo.All.iIdxReg = 0;
1004 ExitInstrInfo.All.fIdxRegInvalid = 1;
1005 ExitInstrInfo.All.iBaseReg = 0;
1006 ExitInstrInfo.All.fBaseRegInvalid = 1;
1007 ExitInstrInfo.All.iReg2 = idxReg2;
1008
1009 /* Displacement not applicable for register addressing. */
1010 GCPtrDisp = 0;
1011 }
1012 else
1013 {
1014 /*
1015 * ModR/M indicates memory addressing.
1016 */
1017 uint8_t uScale = 0;
1018 bool fBaseRegValid = false;
1019 bool fIdxRegValid = false;
1020 uint8_t iBaseReg = 0;
1021 uint8_t iIdxReg = 0;
1022 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
1023 {
1024 /*
1025 * Parse the ModR/M, displacement for 16-bit addressing mode.
1026 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
1027 */
1028 uint16_t u16Disp = 0;
1029 uint8_t const offDisp = offModRm + sizeof(bRm);
1030 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
1031 {
1032 /* Displacement without any registers. */
1033 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
1034 }
1035 else
1036 {
1037 /* Register (index and base). */
1038 switch (bRm & X86_MODRM_RM_MASK)
1039 {
1040 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
1041 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
1042 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
1043 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
1044 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
1045 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
1046 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
1047 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
1048 }
1049
1050 /* Register + displacement. */
1051 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1052 {
1053 case 0: break;
1054 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
1055 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
1056 default:
1057 {
1058 /* Register addressing, handled at the beginning. */
1059 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
1060 break;
1061 }
1062 }
1063 }
1064
1065 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
1066 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
1067 }
1068 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
1069 {
1070 /*
1071 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
1072 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
1073 */
1074 uint32_t u32Disp = 0;
1075 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
1076 {
1077 /* Displacement without any registers. */
1078 uint8_t const offDisp = offModRm + sizeof(bRm);
1079 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
1080 }
1081 else
1082 {
1083 /* Register (and perhaps scale, index and base). */
1084 uint8_t offDisp = offModRm + sizeof(bRm);
1085 iBaseReg = (bRm & X86_MODRM_RM_MASK);
1086 if (iBaseReg == 4)
1087 {
1088 /* An SIB byte follows the ModR/M byte, parse it. */
1089 uint8_t bSib;
1090 uint8_t const offSib = offModRm + sizeof(bRm);
1091 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
1092
1093 /* A displacement may follow SIB, update its offset. */
1094 offDisp += sizeof(bSib);
1095
1096 /* Get the scale. */
1097 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
1098
1099 /* Get the index register. */
1100 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
1101 fIdxRegValid = RT_BOOL(iIdxReg != 4);
1102
1103 /* Get the base register. */
1104 iBaseReg = bSib & X86_SIB_BASE_MASK;
1105 fBaseRegValid = true;
1106 if (iBaseReg == 5)
1107 {
1108 if ((bRm & X86_MODRM_MOD_MASK) == 0)
1109 {
1110 /* Mod is 0 implies a 32-bit displacement with no base. */
1111 fBaseRegValid = false;
1112 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
1113 }
1114 else
1115 {
1116 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
1117 iBaseReg = X86_GREG_xBP;
1118 }
1119 }
1120 }
1121
1122 /* Register + displacement. */
1123 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1124 {
1125 case 0: /* Handled above */ break;
1126 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
1127 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
1128 default:
1129 {
1130 /* Register addressing, handled at the beginning. */
1131 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
1132 break;
1133 }
1134 }
1135 }
1136
1137 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
1138 }
1139 else
1140 {
1141 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
1142
1143 /*
1144 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
1145 * See Intel instruction spec. 2.2 "IA-32e Mode".
1146 */
1147 uint64_t u64Disp = 0;
1148 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
1149 if (fRipRelativeAddr)
1150 {
1151 /*
1152 * RIP-relative addressing mode.
1153 *
1154 * The displacement is 32-bit signed implying an offset range of +/-2G.
1155 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
1156 */
1157 uint8_t const offDisp = offModRm + sizeof(bRm);
1158 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
1159 }
1160 else
1161 {
1162 uint8_t offDisp = offModRm + sizeof(bRm);
1163
1164 /*
1165 * Register (and perhaps scale, index and base).
1166 *
1167 * REX.B extends the most-significant bit of the base register. However, REX.B
1168 * is ignored while determining whether an SIB follows the opcode. Hence, we
1169 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
1170 *
1171 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
1172 */
1173 iBaseReg = (bRm & X86_MODRM_RM_MASK);
1174 if (iBaseReg == 4)
1175 {
1176 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
1177 uint8_t bSib;
1178 uint8_t const offSib = offModRm + sizeof(bRm);
1179 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
1180
1181 /* Displacement may follow SIB, update its offset. */
1182 offDisp += sizeof(bSib);
1183
1184 /* Get the scale. */
1185 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
1186
1187 /* Get the index. */
1188 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
1189 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
1190
1191 /* Get the base. */
1192 iBaseReg = (bSib & X86_SIB_BASE_MASK);
1193 fBaseRegValid = true;
1194 if (iBaseReg == 5)
1195 {
1196 if ((bRm & X86_MODRM_MOD_MASK) == 0)
1197 {
1198 /* Mod is 0 implies a signed 32-bit displacement with no base. */
1199 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
1200 }
1201 else
1202 {
1203 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
1204 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
1205 }
1206 }
1207 }
1208 iBaseReg |= pVCpu->iem.s.uRexB;
1209
1210 /* Register + displacement. */
1211 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1212 {
1213 case 0: /* Handled above */ break;
1214 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
1215 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
1216 default:
1217 {
1218 /* Register addressing, handled at the beginning. */
1219 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
1220 break;
1221 }
1222 }
1223 }
1224
1225 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
1226 }
1227
1228 /*
1229 * The primary or secondary register operand is reported in iReg2 depending
1230 * on whether the primary operand is in read/write form.
1231 */
1232 uint8_t idxReg2;
1233 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
1234 {
1235 idxReg2 = bRm & X86_MODRM_RM_MASK;
1236 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
1237 idxReg2 |= pVCpu->iem.s.uRexB;
1238 }
1239 else
1240 {
1241 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK;
1242 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
1243 idxReg2 |= pVCpu->iem.s.uRexReg;
1244 }
1245 ExitInstrInfo.All.u2Scaling = uScale;
1246 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */
1247 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
1248 ExitInstrInfo.All.fIsRegOperand = 0;
1249 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
1250 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
1251 ExitInstrInfo.All.iIdxReg = iIdxReg;
1252 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
1253 ExitInstrInfo.All.iBaseReg = iBaseReg;
1254 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
1255 ExitInstrInfo.All.iReg2 = idxReg2;
1256 }
1257
1258 /*
1259 * Handle exceptions to the norm for certain instructions.
1260 * (e.g. some instructions convey an instruction identity in place of iReg2).
1261 */
1262 switch (uExitReason)
1263 {
1264 case VMX_EXIT_GDTR_IDTR_ACCESS:
1265 {
1266 Assert(VMXINSTRID_IS_VALID(uInstrId));
1267 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
1268 ExitInstrInfo.GdtIdt.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
1269 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
1270 break;
1271 }
1272
1273 case VMX_EXIT_LDTR_TR_ACCESS:
1274 {
1275 Assert(VMXINSTRID_IS_VALID(uInstrId));
1276 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
1277 ExitInstrInfo.LdtTr.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
1278 ExitInstrInfo.LdtTr.u2Undef0 = 0;
1279 break;
1280 }
1281
1282 case VMX_EXIT_RDRAND:
1283 case VMX_EXIT_RDSEED:
1284 {
1285 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
1286 break;
1287 }
1288 }
1289
1290 /* Update displacement and return the constructed VM-exit instruction information field. */
1291 if (pGCPtrDisp)
1292 *pGCPtrDisp = GCPtrDisp;
1293
1294 return ExitInstrInfo.u;
1295}
1296
1297
1298/**
1299 * Converts an IEM exception event type to a VMX event type.
1300 *
1301 * @returns The VMX event type.
1302 * @param uVector The interrupt / exception vector.
1303 * @param fFlags The IEM event flag (see IEM_XCPT_FLAGS_XXX).
1304 */
1305DECLINLINE(uint8_t) iemVmxGetEventType(uint32_t uVector, uint32_t fFlags)
1306{
1307 /* Paranoia (callers may use these interchangeably). */
1308 AssertCompile(VMX_EXIT_INT_INFO_TYPE_NMI == VMX_IDT_VECTORING_INFO_TYPE_NMI);
1309 AssertCompile(VMX_EXIT_INT_INFO_TYPE_HW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT);
1310 AssertCompile(VMX_EXIT_INT_INFO_TYPE_EXT_INT == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
1311 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT);
1312 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_INT == VMX_IDT_VECTORING_INFO_TYPE_SW_INT);
1313 AssertCompile(VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
1314 AssertCompile(VMX_EXIT_INT_INFO_TYPE_NMI == VMX_ENTRY_INT_INFO_TYPE_NMI);
1315 AssertCompile(VMX_EXIT_INT_INFO_TYPE_HW_XCPT == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT);
1316 AssertCompile(VMX_EXIT_INT_INFO_TYPE_EXT_INT == VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
1317 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_XCPT == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT);
1318 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_INT == VMX_ENTRY_INT_INFO_TYPE_SW_INT);
1319 AssertCompile(VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT);
1320
1321 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1322 {
1323 if (uVector == X86_XCPT_NMI)
1324 return VMX_EXIT_INT_INFO_TYPE_NMI;
1325 return VMX_EXIT_INT_INFO_TYPE_HW_XCPT;
1326 }
1327
1328 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1329 {
1330 if (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR))
1331 return VMX_EXIT_INT_INFO_TYPE_SW_XCPT;
1332 if (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
1333 return VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT;
1334 return VMX_EXIT_INT_INFO_TYPE_SW_INT;
1335 }
1336
1337 Assert(fFlags & IEM_XCPT_FLAGS_T_EXT_INT);
1338 return VMX_EXIT_INT_INFO_TYPE_EXT_INT;
1339}
1340
1341
1342/**
1343 * Sets the VM-instruction error VMCS field.
1344 *
1345 * @param pVCpu The cross context virtual CPU structure.
1346 * @param enmInsErr The VM-instruction error.
1347 */
1348DECL_FORCE_INLINE(void) iemVmxVmcsSetVmInstrErr(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1349{
1350 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1351 pVmcs->u32RoVmInstrError = enmInsErr;
1352}
1353
1354
1355/**
1356 * Sets the VM-exit qualification VMCS field.
1357 *
1358 * @param pVCpu The cross context virtual CPU structure.
1359 * @param uExitQual The VM-exit qualification.
1360 */
1361DECL_FORCE_INLINE(void) iemVmxVmcsSetExitQual(PVMCPU pVCpu, uint64_t uExitQual)
1362{
1363 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1364 pVmcs->u64RoExitQual.u = uExitQual;
1365}
1366
1367
1368/**
1369 * Sets the VM-exit interruption information field.
1370 *
1371 * @param pVCpu The cross context virtual CPU structure.
1372 * @param uExitQual The VM-exit interruption information.
1373 */
1374DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntInfo(PVMCPU pVCpu, uint32_t uExitIntInfo)
1375{
1376 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1377 pVmcs->u32RoExitIntInfo = uExitIntInfo;
1378}
1379
1380
1381/**
1382 * Sets the VM-exit interruption error code.
1383 *
1384 * @param pVCpu The cross context virtual CPU structure.
1385 * @param uErrCode The error code.
1386 */
1387DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntErrCode(PVMCPU pVCpu, uint32_t uErrCode)
1388{
1389 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1390 pVmcs->u32RoExitIntErrCode = uErrCode;
1391}
1392
1393
1394/**
1395 * Sets the IDT-vectoring information field.
1396 *
1397 * @param pVCpu The cross context virtual CPU structure.
1398 * @param uIdtVectorInfo The IDT-vectoring information.
1399 */
1400DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringInfo(PVMCPU pVCpu, uint32_t uIdtVectorInfo)
1401{
1402 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1403 pVmcs->u32RoIdtVectoringInfo = uIdtVectorInfo;
1404}
1405
1406
1407/**
1408 * Sets the IDT-vectoring error code field.
1409 *
1410 * @param pVCpu The cross context virtual CPU structure.
1411 * @param uErrCode The error code.
1412 */
1413DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringErrCode(PVMCPU pVCpu, uint32_t uErrCode)
1414{
1415 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1416 pVmcs->u32RoIdtVectoringErrCode = uErrCode;
1417}
1418
1419
1420/**
1421 * Sets the VM-exit guest-linear address VMCS field.
1422 *
1423 * @param pVCpu The cross context virtual CPU structure.
1424 * @param uGuestLinearAddr The VM-exit guest-linear address.
1425 */
1426DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestLinearAddr(PVMCPU pVCpu, uint64_t uGuestLinearAddr)
1427{
1428 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1429 pVmcs->u64RoGuestLinearAddr.u = uGuestLinearAddr;
1430}
1431
1432
1433/**
1434 * Sets the VM-exit guest-physical address VMCS field.
1435 *
1436 * @param pVCpu The cross context virtual CPU structure.
1437 * @param uGuestPhysAddr The VM-exit guest-physical address.
1438 */
1439DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestPhysAddr(PVMCPU pVCpu, uint64_t uGuestPhysAddr)
1440{
1441 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1442 pVmcs->u64RoGuestPhysAddr.u = uGuestPhysAddr;
1443}
1444
1445
1446/**
1447 * Sets the VM-exit instruction length VMCS field.
1448 *
1449 * @param pVCpu The cross context virtual CPU structure.
1450 * @param cbInstr The VM-exit instruction length in bytes.
1451 *
1452 * @remarks Callers may clear this field to 0. Hence, this function does not check
1453 * the validity of the instruction length.
1454 */
1455DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrLen(PVMCPU pVCpu, uint32_t cbInstr)
1456{
1457 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1458 pVmcs->u32RoExitInstrLen = cbInstr;
1459}
1460
1461
1462/**
1463 * Sets the VM-exit instruction info. VMCS field.
1464 *
1465 * @param pVCpu The cross context virtual CPU structure.
1466 * @param uExitInstrInfo The VM-exit instruction information.
1467 */
1468DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitInstrInfo)
1469{
1470 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1471 pVmcs->u32RoExitInstrInfo = uExitInstrInfo;
1472}
1473
1474
1475/**
1476 * Implements VMSucceed for VMX instruction success.
1477 *
1478 * @param pVCpu The cross context virtual CPU structure.
1479 */
1480DECL_FORCE_INLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
1481{
1482 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1483}
1484
1485
1486/**
1487 * Implements VMFailInvalid for VMX instruction failure.
1488 *
1489 * @param pVCpu The cross context virtual CPU structure.
1490 */
1491DECL_FORCE_INLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
1492{
1493 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1494 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
1495}
1496
1497
1498/**
1499 * Implements VMFailValid for VMX instruction failure.
1500 *
1501 * @param pVCpu The cross context virtual CPU structure.
1502 * @param enmInsErr The VM instruction error.
1503 */
1504DECL_FORCE_INLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1505{
1506 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1507 {
1508 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1509 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
1510 iemVmxVmcsSetVmInstrErr(pVCpu, enmInsErr);
1511 }
1512}
1513
1514
1515/**
1516 * Implements VMFail for VMX instruction failure.
1517 *
1518 * @param pVCpu The cross context virtual CPU structure.
1519 * @param enmInsErr The VM instruction error.
1520 */
1521DECL_FORCE_INLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1522{
1523 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1524 iemVmxVmFailValid(pVCpu, enmInsErr);
1525 else
1526 iemVmxVmFailInvalid(pVCpu);
1527}
1528
1529
1530/**
1531 * Checks if the given auto-load/store MSR area count is valid for the
1532 * implementation.
1533 *
1534 * @returns @c true if it's within the valid limit, @c false otherwise.
1535 * @param pVCpu The cross context virtual CPU structure.
1536 * @param uMsrCount The MSR area count to check.
1537 */
1538DECL_FORCE_INLINE(bool) iemVmxIsAutoMsrCountValid(PVMCPU pVCpu, uint32_t uMsrCount)
1539{
1540 uint64_t const u64VmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
1541 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(u64VmxMiscMsr);
1542 Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
1543 if (uMsrCount <= cMaxSupportedMsrs)
1544 return true;
1545 return false;
1546}
1547
1548
1549/**
1550 * Flushes the current VMCS contents back to guest memory.
1551 *
1552 * @returns VBox status code.
1553 * @param pVCpu The cross context virtual CPU structure.
1554 */
1555DECL_FORCE_INLINE(int) iemVmxCommitCurrentVmcsToMemory(PVMCPU pVCpu)
1556{
1557 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1558 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
1559 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));
1560 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
1561 return rc;
1562}
1563
1564
1565/**
1566 * Implements VMSucceed for the VMREAD instruction and increments the guest RIP.
1567 *
1568 * @param pVCpu The cross context virtual CPU structure.
1569 */
1570DECL_FORCE_INLINE(void) iemVmxVmreadSuccess(PVMCPU pVCpu, uint8_t cbInstr)
1571{
1572 iemVmxVmSucceed(pVCpu);
1573 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1574}
1575
1576
1577/**
1578 * Gets the instruction diagnostic for segment base checks during VM-entry of a
1579 * nested-guest.
1580 *
1581 * @param iSegReg The segment index (X86_SREG_XXX).
1582 */
1583IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBase(unsigned iSegReg)
1584{
1585 switch (iSegReg)
1586 {
1587 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseCs;
1588 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseDs;
1589 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseEs;
1590 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseFs;
1591 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseGs;
1592 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseSs;
1593 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_1);
1594 }
1595}
1596
1597
1598/**
1599 * Gets the instruction diagnostic for segment base checks during VM-entry of a
1600 * nested-guest that is in Virtual-8086 mode.
1601 *
1602 * @param iSegReg The segment index (X86_SREG_XXX).
1603 */
1604IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBaseV86(unsigned iSegReg)
1605{
1606 switch (iSegReg)
1607 {
1608 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseV86Cs;
1609 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ds;
1610 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseV86Es;
1611 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseV86Fs;
1612 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseV86Gs;
1613 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ss;
1614 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
1615 }
1616}
1617
1618
1619/**
1620 * Gets the instruction diagnostic for segment limit checks during VM-entry of a
1621 * nested-guest that is in Virtual-8086 mode.
1622 *
1623 * @param iSegReg The segment index (X86_SREG_XXX).
1624 */
1625IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegLimitV86(unsigned iSegReg)
1626{
1627 switch (iSegReg)
1628 {
1629 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegLimitV86Cs;
1630 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ds;
1631 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegLimitV86Es;
1632 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegLimitV86Fs;
1633 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegLimitV86Gs;
1634 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ss;
1635 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_3);
1636 }
1637}
1638
1639
1640/**
1641 * Gets the instruction diagnostic for segment attribute checks during VM-entry of a
1642 * nested-guest that is in Virtual-8086 mode.
1643 *
1644 * @param iSegReg The segment index (X86_SREG_XXX).
1645 */
1646IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrV86(unsigned iSegReg)
1647{
1648 switch (iSegReg)
1649 {
1650 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrV86Cs;
1651 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ds;
1652 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrV86Es;
1653 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrV86Fs;
1654 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrV86Gs;
1655 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ss;
1656 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_4);
1657 }
1658}
1659
1660
1661/**
1662 * Gets the instruction diagnostic for segment attributes reserved bits failure
1663 * during VM-entry of a nested-guest.
1664 *
1665 * @param iSegReg The segment index (X86_SREG_XXX).
1666 */
1667IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrRsvd(unsigned iSegReg)
1668{
1669 switch (iSegReg)
1670 {
1671 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdCs;
1672 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdDs;
1673 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrRsvdEs;
1674 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdFs;
1675 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdGs;
1676 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdSs;
1677 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_5);
1678 }
1679}
1680
1681
1682/**
1683 * Gets the instruction diagnostic for segment attributes descriptor-type
1684 * (code/segment or system) failure during VM-entry of a nested-guest.
1685 *
1686 * @param iSegReg The segment index (X86_SREG_XXX).
1687 */
1688IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDescType(unsigned iSegReg)
1689{
1690 switch (iSegReg)
1691 {
1692 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeCs;
1693 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeDs;
1694 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeEs;
1695 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeFs;
1696 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeGs;
1697 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs;
1698 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_6);
1699 }
1700}
1701
1702
1703/**
1704 * Gets the instruction diagnostic for segment attributes descriptor-type
1705 * (code/segment or system) failure during VM-entry of a nested-guest.
1706 *
1707 * @param iSegReg The segment index (X86_SREG_XXX).
1708 */
1709IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrPresent(unsigned iSegReg)
1710{
1711 switch (iSegReg)
1712 {
1713 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrPresentCs;
1714 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrPresentDs;
1715 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrPresentEs;
1716 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrPresentFs;
1717 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrPresentGs;
1718 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrPresentSs;
1719 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_7);
1720 }
1721}
1722
1723
1724/**
1725 * Gets the instruction diagnostic for segment attribute granularity failure during
1726 * VM-entry of a nested-guest.
1727 *
1728 * @param iSegReg The segment index (X86_SREG_XXX).
1729 */
1730IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrGran(unsigned iSegReg)
1731{
1732 switch (iSegReg)
1733 {
1734 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrGranCs;
1735 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrGranDs;
1736 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrGranEs;
1737 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrGranFs;
1738 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrGranGs;
1739 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrGranSs;
1740 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_8);
1741 }
1742}
1743
1744/**
1745 * Gets the instruction diagnostic for segment attribute DPL/RPL failure during
1746 * VM-entry of a nested-guest.
1747 *
1748 * @param iSegReg The segment index (X86_SREG_XXX).
1749 */
1750IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDplRpl(unsigned iSegReg)
1751{
1752 switch (iSegReg)
1753 {
1754 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplCs;
1755 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplDs;
1756 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDplRplEs;
1757 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplFs;
1758 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplGs;
1759 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplSs;
1760 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_9);
1761 }
1762}
1763
1764
1765/**
1766 * Gets the instruction diagnostic for segment attribute type accessed failure
1767 * during VM-entry of a nested-guest.
1768 *
1769 * @param iSegReg The segment index (X86_SREG_XXX).
1770 */
1771IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrTypeAcc(unsigned iSegReg)
1772{
1773 switch (iSegReg)
1774 {
1775 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccCs;
1776 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccDs;
1777 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccEs;
1778 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccFs;
1779 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccGs;
1780 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs;
1781 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_10);
1782 }
1783}
1784
1785
1786/**
1787 * Gets the instruction diagnostic for guest CR3 referenced PDPTE reserved bits
1788 * failure during VM-entry of a nested-guest.
1789 *
1790 * @param iSegReg The PDPTE entry index.
1791 */
1792IEM_STATIC VMXVDIAG iemVmxGetDiagVmentryPdpteRsvd(unsigned iPdpte)
1793{
1794 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
1795 switch (iPdpte)
1796 {
1797 case 0: return kVmxVDiag_Vmentry_GuestPdpte0Rsvd;
1798 case 1: return kVmxVDiag_Vmentry_GuestPdpte1Rsvd;
1799 case 2: return kVmxVDiag_Vmentry_GuestPdpte2Rsvd;
1800 case 3: return kVmxVDiag_Vmentry_GuestPdpte3Rsvd;
1801 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_11);
1802 }
1803}
1804
1805
1806/**
1807 * Gets the instruction diagnostic for host CR3 referenced PDPTE reserved bits
1808 * failure during VM-exit of a nested-guest.
1809 *
1810 * @param iSegReg The PDPTE entry index.
1811 */
1812IEM_STATIC VMXVDIAG iemVmxGetDiagVmexitPdpteRsvd(unsigned iPdpte)
1813{
1814 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
1815 switch (iPdpte)
1816 {
1817 case 0: return kVmxVDiag_Vmexit_HostPdpte0Rsvd;
1818 case 1: return kVmxVDiag_Vmexit_HostPdpte1Rsvd;
1819 case 2: return kVmxVDiag_Vmexit_HostPdpte2Rsvd;
1820 case 3: return kVmxVDiag_Vmexit_HostPdpte3Rsvd;
1821 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_12);
1822 }
1823}
1824
1825
1826/**
1827 * Saves the guest control registers, debug registers and some MSRs are part of
1828 * VM-exit.
1829 *
1830 * @param pVCpu The cross context virtual CPU structure.
1831 */
1832IEM_STATIC void iemVmxVmexitSaveGuestControlRegsMsrs(PVMCPU pVCpu)
1833{
1834 /*
1835 * Saves the guest control registers, debug registers and some MSRs.
1836 * See Intel spec. 27.3.1 "Saving Control Registers, Debug Registers and MSRs".
1837 */
1838 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1839
1840 /* Save control registers. */
1841 pVmcs->u64GuestCr0.u = pVCpu->cpum.GstCtx.cr0;
1842 pVmcs->u64GuestCr3.u = pVCpu->cpum.GstCtx.cr3;
1843 pVmcs->u64GuestCr4.u = pVCpu->cpum.GstCtx.cr4;
1844
1845 /* Save SYSENTER CS, ESP, EIP. */
1846 pVmcs->u32GuestSysenterCS = pVCpu->cpum.GstCtx.SysEnter.cs;
1847 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1848 {
1849 pVmcs->u64GuestSysenterEsp.u = pVCpu->cpum.GstCtx.SysEnter.esp;
1850 pVmcs->u64GuestSysenterEip.u = pVCpu->cpum.GstCtx.SysEnter.eip;
1851 }
1852 else
1853 {
1854 pVmcs->u64GuestSysenterEsp.s.Lo = pVCpu->cpum.GstCtx.SysEnter.esp;
1855 pVmcs->u64GuestSysenterEip.s.Lo = pVCpu->cpum.GstCtx.SysEnter.eip;
1856 }
1857
1858 /* Save debug registers (DR7 and IA32_DEBUGCTL MSR). */
1859 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG)
1860 {
1861 pVmcs->u64GuestDr7.u = pVCpu->cpum.GstCtx.dr[7];
1862 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
1863 }
1864
1865 /* Save PAT MSR. */
1866 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR)
1867 pVmcs->u64GuestPatMsr.u = pVCpu->cpum.GstCtx.msrPAT;
1868
1869 /* Save EFER MSR. */
1870 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR)
1871 pVmcs->u64GuestEferMsr.u = pVCpu->cpum.GstCtx.msrEFER;
1872
1873 /* We don't support clearing IA32_BNDCFGS MSR yet. */
1874 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR));
1875
1876 /* Nothing to do for SMBASE register - We don't support SMM yet. */
1877}
1878
1879
1880/**
1881 * Saves the guest force-flags in preparation of entering the nested-guest.
1882 *
1883 * @param pVCpu The cross context virtual CPU structure.
1884 */
1885IEM_STATIC void iemVmxVmentrySaveForceFlags(PVMCPU pVCpu)
1886{
1887 /* We shouldn't be called multiple times during VM-entry. */
1888 Assert(pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions == 0);
1889
1890 /* MTF should not be set outside VMX non-root mode. */
1891 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
1892
1893 /*
1894 * Preserve the required force-flags.
1895 *
1896 * We cache and clear force-flags that would affect the execution of the
1897 * nested-guest. Cached flags are then restored while returning to the guest
1898 * if necessary.
1899 *
1900 * - VMCPU_FF_INHIBIT_INTERRUPTS need not be cached as it only affects
1901 * interrupts until the completion of the current VMLAUNCH/VMRESUME
1902 * instruction. Interrupt inhibition for any nested-guest instruction
1903 * will be set later while loading the guest-interruptibility state.
1904 *
1905 * - VMCPU_FF_BLOCK_NMIS needs to be cached as VM-exits caused before
1906 * successful VM-entry needs to continue blocking NMIs if it was in effect
1907 * during VM-entry.
1908 *
1909 * - MTF need not be preserved as it's used only in VMX non-root mode and
1910 * is supplied on VM-entry through the VM-execution controls.
1911 *
1912 * The remaining FFs (e.g. timers, APIC updates) must stay in place so that
1913 * we will be able to generate interrupts that may cause VM-exits for
1914 * the nested-guest.
1915 */
1916 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
1917
1918 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_BLOCK_NMIS))
1919 VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_BLOCK_NMIS);
1920}
1921
1922
1923/**
1924 * Restores the guest force-flags in preparation of exiting the nested-guest.
1925 *
1926 * @param pVCpu The cross context virtual CPU structure.
1927 */
1928IEM_STATIC void iemVmxVmexitRestoreForceFlags(PVMCPU pVCpu)
1929{
1930 if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
1931 {
1932 VMCPU_FF_SET_MASK(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);
1933 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;
1934 }
1935}
1936
1937
1938/**
1939 * Perform a VMX transition updated PGM, IEM and CPUM.
1940 *
1941 * @param pVCpu The cross context virtual CPU structure.
1942 */
1943IEM_STATIC int iemVmxWorldSwitch(PVMCPU pVCpu)
1944{
1945 /*
1946 * Inform PGM about paging mode changes.
1947 * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
1948 * see comment in iemMemPageTranslateAndCheckAccess().
1949 */
1950 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
1951# ifdef IN_RING3
1952 Assert(rc != VINF_PGM_CHANGE_MODE);
1953# endif
1954 AssertRCReturn(rc, rc);
1955
1956 /* Inform CPUM (recompiler), can later be removed. */
1957 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1958
1959 /*
1960 * Flush the TLB with new CR3. This is required in case the PGM mode change
1961 * above doesn't actually change anything.
1962 */
1963 if (rc == VINF_SUCCESS)
1964 {
1965 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true);
1966 AssertRCReturn(rc, rc);
1967 }
1968
1969 /* Re-initialize IEM cache/state after the drastic mode switch. */
1970 iemReInitExec(pVCpu);
1971 return rc;
1972}
1973
1974
1975/**
1976 * Saves guest segment registers, GDTR, IDTR, LDTR, TR as part of VM-exit.
1977 *
1978 * @param pVCpu The cross context virtual CPU structure.
1979 */
1980IEM_STATIC void iemVmxVmexitSaveGuestSegRegs(PVMCPU pVCpu)
1981{
1982 /*
1983 * Save guest segment registers, GDTR, IDTR, LDTR, TR.
1984 * See Intel spec 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
1985 */
1986 /* CS, SS, ES, DS, FS, GS. */
1987 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1988 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
1989 {
1990 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1991 if (!pSelReg->Attr.n.u1Unusable)
1992 iemVmxVmcsSetGuestSegReg(pVmcs, iSegReg, pSelReg);
1993 else
1994 {
1995 /*
1996 * For unusable segments the attributes are undefined except for CS and SS.
1997 * For the rest we don't bother preserving anything but the unusable bit.
1998 */
1999 switch (iSegReg)
2000 {
2001 case X86_SREG_CS:
2002 pVmcs->GuestCs = pSelReg->Sel;
2003 pVmcs->u64GuestCsBase.u = pSelReg->u64Base;
2004 pVmcs->u32GuestCsLimit = pSelReg->u32Limit;
2005 pVmcs->u32GuestCsAttr = pSelReg->Attr.u & ( X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
2006 | X86DESCATTR_UNUSABLE);
2007 break;
2008
2009 case X86_SREG_SS:
2010 pVmcs->GuestSs = pSelReg->Sel;
2011 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2012 pVmcs->u64GuestSsBase.u &= UINT32_C(0xffffffff);
2013 pVmcs->u32GuestSsAttr = pSelReg->Attr.u & (X86DESCATTR_DPL | X86DESCATTR_UNUSABLE);
2014 break;
2015
2016 case X86_SREG_DS:
2017 pVmcs->GuestDs = pSelReg->Sel;
2018 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2019 pVmcs->u64GuestDsBase.u &= UINT32_C(0xffffffff);
2020 pVmcs->u32GuestDsAttr = X86DESCATTR_UNUSABLE;
2021 break;
2022
2023 case X86_SREG_ES:
2024 pVmcs->GuestEs = pSelReg->Sel;
2025 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2026 pVmcs->u64GuestEsBase.u &= UINT32_C(0xffffffff);
2027 pVmcs->u32GuestEsAttr = X86DESCATTR_UNUSABLE;
2028 break;
2029
2030 case X86_SREG_FS:
2031 pVmcs->GuestFs = pSelReg->Sel;
2032 pVmcs->u64GuestFsBase.u = pSelReg->u64Base;
2033 pVmcs->u32GuestFsAttr = X86DESCATTR_UNUSABLE;
2034 break;
2035
2036 case X86_SREG_GS:
2037 pVmcs->GuestGs = pSelReg->Sel;
2038 pVmcs->u64GuestGsBase.u = pSelReg->u64Base;
2039 pVmcs->u32GuestGsAttr = X86DESCATTR_UNUSABLE;
2040 break;
2041 }
2042 }
2043 }
2044
2045 /* Segment attribute bits 31:7 and 11:8 MBZ. */
2046 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
2047 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_UNUSABLE;
2048 /* LDTR. */
2049 {
2050 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.ldtr;
2051 pVmcs->GuestLdtr = pSelReg->Sel;
2052 pVmcs->u64GuestLdtrBase.u = pSelReg->u64Base;
2053 Assert(X86_IS_CANONICAL(pSelReg->u64Base));
2054 pVmcs->u32GuestLdtrLimit = pSelReg->u32Limit;
2055 pVmcs->u32GuestLdtrAttr = pSelReg->Attr.u & fValidAttrMask;
2056 }
2057
2058 /* TR. */
2059 {
2060 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.tr;
2061 pVmcs->GuestTr = pSelReg->Sel;
2062 pVmcs->u64GuestTrBase.u = pSelReg->u64Base;
2063 pVmcs->u32GuestTrLimit = pSelReg->u32Limit;
2064 pVmcs->u32GuestTrAttr = pSelReg->Attr.u & fValidAttrMask;
2065 }
2066
2067 /* GDTR. */
2068 pVmcs->u64GuestGdtrBase.u = pVCpu->cpum.GstCtx.gdtr.pGdt;
2069 pVmcs->u32GuestGdtrLimit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
2070
2071 /* IDTR. */
2072 pVmcs->u64GuestIdtrBase.u = pVCpu->cpum.GstCtx.idtr.pIdt;
2073 pVmcs->u32GuestIdtrLimit = pVCpu->cpum.GstCtx.idtr.cbIdt;
2074}
2075
2076
2077/**
2078 * Saves guest non-register state as part of VM-exit.
2079 *
2080 * @param pVCpu The cross context virtual CPU structure.
2081 * @param uExitReason The VM-exit reason.
2082 */
2083IEM_STATIC void iemVmxVmexitSaveGuestNonRegState(PVMCPU pVCpu, uint32_t uExitReason)
2084{
2085 /*
2086 * Save guest non-register state.
2087 * See Intel spec. 27.3.4 "Saving Non-Register State".
2088 */
2089 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2090
2091 /* Activity-state: VM-exits occur before changing the activity state, nothing further to do */
2092
2093 /* Interruptibility-state. */
2094 pVmcs->u32GuestIntrState = 0;
2095 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
2096 { /** @todo NSTVMX: Virtual-NMI blocking. */ }
2097 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2098 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
2099
2100 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2101 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
2102 {
2103 /** @todo NSTVMX: We can't distinguish between blocking-by-MovSS and blocking-by-STI
2104 * currently. */
2105 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
2106 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2107 }
2108 /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */
2109
2110 /* Pending debug exceptions. */
2111 if ( uExitReason != VMX_EXIT_INIT_SIGNAL
2112 && uExitReason != VMX_EXIT_SMI
2113 && uExitReason != VMX_EXIT_ERR_MACHINE_CHECK
2114 && !HMVmxIsTrapLikeVmexit(uExitReason))
2115 {
2116 /** @todo NSTVMX: also must exclude VM-exits caused by debug exceptions when
2117 * block-by-MovSS is in effect. */
2118 pVmcs->u64GuestPendingDbgXcpt.u = 0;
2119 }
2120
2121 /* Save VMX-preemption timer value. */
2122 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER)
2123 {
2124 uint32_t uPreemptTimer;
2125 if (uExitReason == VMX_EXIT_PREEMPT_TIMER)
2126 uPreemptTimer = 0;
2127 else
2128 {
2129 /*
2130 * Assume the following:
2131 * PreemptTimerShift = 5
2132 * VmcsPreemptTimer = 2 (i.e. need to decrement by 1 every 2 * RT_BIT(5) = 20000 TSC ticks)
2133 * VmentryTick = 50000 (TSC at time of VM-entry)
2134 *
2135 * CurTick Delta PreemptTimerVal
2136 * ----------------------------------
2137 * 60000 10000 2
2138 * 80000 30000 1
2139 * 90000 40000 0 -> VM-exit.
2140 *
2141 * If Delta >= VmcsPreemptTimer * RT_BIT(PreemptTimerShift) cause a VMX-preemption timer VM-exit.
2142 *
2143 * The saved VMX-preemption timer value is calculated as follows:
2144 * PreemptTimerVal = VmcsPreemptTimer - (Delta / (VmcsPreemptTimer * RT_BIT(PreemptTimerShift)))
2145 * E.g.:
2146 * Delta = 10000
2147 * Tmp = 10000 / (2 * 10000) = 0.5
2148 * NewPt = 2 - 0.5 = 2
2149 * Delta = 30000
2150 * Tmp = 30000 / (2 * 10000) = 1.5
2151 * NewPt = 2 - 1.5 = 1
2152 * Delta = 40000
2153 * Tmp = 40000 / 20000 = 2
2154 * NewPt = 2 - 2 = 0
2155 */
2156 uint64_t const uCurTick = TMCpuTickGetNoCheck(pVCpu);
2157 uint64_t const uVmentryTick = pVCpu->cpum.GstCtx.hwvirt.vmx.uVmentryTick;
2158 uint64_t const uDelta = uCurTick - uVmentryTick;
2159 uint32_t const uVmcsPreemptVal = pVmcs->u32PreemptTimer;
2160 uPreemptTimer = uVmcsPreemptVal - ASMDivU64ByU32RetU32(uDelta, uVmcsPreemptVal * RT_BIT(VMX_V_PREEMPT_TIMER_SHIFT));
2161 }
2162
2163 pVmcs->u32PreemptTimer = uPreemptTimer;
2164 }
2165
2166
2167 /* PDPTEs. */
2168 /* We don't support EPT yet. */
2169 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
2170 pVmcs->u64GuestPdpte0.u = 0;
2171 pVmcs->u64GuestPdpte1.u = 0;
2172 pVmcs->u64GuestPdpte2.u = 0;
2173 pVmcs->u64GuestPdpte3.u = 0;
2174}
2175
2176
2177/**
2178 * Saves the guest-state as part of VM-exit.
2179 *
2180 * @returns VBox status code.
2181 * @param pVCpu The cross context virtual CPU structure.
2182 * @param uExitReason The VM-exit reason.
2183 */
2184IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPU pVCpu, uint32_t uExitReason)
2185{
2186 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2187 Assert(pVmcs);
2188
2189 iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu);
2190 iemVmxVmexitSaveGuestSegRegs(pVCpu);
2191
2192 /*
2193 * Save guest RIP, RSP and RFLAGS.
2194 * See Intel spec. 27.3.3 "Saving RIP, RSP and RFLAGS".
2195 *
2196 * For trap-like VM-exits we must advance the RIP by the length of the instruction.
2197 * Callers must pass the instruction length in the VM-exit instruction length
2198 * field though it is undefined for such VM-exits. After updating RIP here, we clear
2199 * the VM-exit instruction length field.
2200 *
2201 * See Intel spec. 27.1 "Architectural State Before A VM Exit"
2202 */
2203 if (HMVmxIsTrapLikeVmexit(uExitReason))
2204 {
2205 uint8_t const cbInstr = pVmcs->u32RoExitInstrLen;
2206 AssertMsg(cbInstr >= 1 && cbInstr <= 15, ("uReason=%u cbInstr=%u\n", uExitReason, cbInstr));
2207 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2208 iemVmxVmcsSetExitInstrLen(pVCpu, 0 /* cbInstr */);
2209 }
2210
2211 /* We don't support enclave mode yet. */
2212 pVmcs->u64GuestRip.u = pVCpu->cpum.GstCtx.rip;
2213 pVmcs->u64GuestRsp.u = pVCpu->cpum.GstCtx.rsp;
2214 pVmcs->u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u; /** @todo NSTVMX: Check RFLAGS.RF handling. */
2215
2216 iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason);
2217}
2218
2219
2220/**
2221 * Saves the guest MSRs into the VM-exit auto-store MSRs area as part of VM-exit.
2222 *
2223 * @returns VBox status code.
2224 * @param pVCpu The cross context virtual CPU structure.
2225 * @param uExitReason The VM-exit reason (for diagnostic purposes).
2226 */
2227IEM_STATIC int iemVmxVmexitSaveGuestAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
2228{
2229 /*
2230 * Save guest MSRs.
2231 * See Intel spec. 27.4 "Saving MSRs".
2232 */
2233 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2234 const char *const pszFailure = "VMX-abort";
2235
2236 /*
2237 * The VM-exit MSR-store area address need not be a valid guest-physical address if the
2238 * VM-exit MSR-store count is 0. If this is the case, bail early without reading it.
2239 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
2240 */
2241 uint32_t const cMsrs = pVmcs->u32ExitMsrStoreCount;
2242 if (!cMsrs)
2243 return VINF_SUCCESS;
2244
2245 /*
2246 * Verify the MSR auto-store count. Physical CPUs can behave unpredictably if the count
2247 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
2248 * implementation causes a VMX-abort followed by a triple-fault.
2249 */
2250 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
2251 if (fIsMsrCountValid)
2252 { /* likely */ }
2253 else
2254 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount);
2255
2256 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
2257 Assert(pMsr);
2258 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
2259 {
2260 if ( !pMsr->u32Reserved
2261 && pMsr->u32Msr != MSR_IA32_SMBASE
2262 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
2263 {
2264 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pMsr->u32Msr, &pMsr->u64Value);
2265 if (rcStrict == VINF_SUCCESS)
2266 continue;
2267
2268 /*
2269 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
2270 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
2271 * recording the MSR index in the auxiliary info. field and indicated further by our
2272 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
2273 * if possible, or come up with a better, generic solution.
2274 */
2275 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2276 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_READ
2277 ? kVmxVDiag_Vmexit_MsrStoreRing3
2278 : kVmxVDiag_Vmexit_MsrStore;
2279 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2280 }
2281 else
2282 {
2283 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2284 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreRsvd);
2285 }
2286 }
2287
2288 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrStore.u;
2289 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysAutoMsrArea,
2290 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), VMX_V_AUTOMSR_AREA_SIZE);
2291 if (RT_SUCCESS(rc))
2292 { /* likely */ }
2293 else
2294 {
2295 AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
2296 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys);
2297 }
2298
2299 NOREF(uExitReason);
2300 NOREF(pszFailure);
2301 return VINF_SUCCESS;
2302}
2303
2304
2305/**
2306 * Performs a VMX abort (due to an fatal error during VM-exit).
2307 *
2308 * @returns Strict VBox status code.
2309 * @param pVCpu The cross context virtual CPU structure.
2310 * @param enmAbort The VMX abort reason.
2311 */
2312IEM_STATIC VBOXSTRICTRC iemVmxAbort(PVMCPU pVCpu, VMXABORT enmAbort)
2313{
2314 /*
2315 * Perform the VMX abort.
2316 * See Intel spec. 27.7 "VMX Aborts".
2317 */
2318 LogFunc(("enmAbort=%u (%s) -> RESET\n", enmAbort, HMVmxGetAbortDesc(enmAbort)));
2319
2320 /* We don't support SMX yet. */
2321 pVCpu->cpum.GstCtx.hwvirt.vmx.enmAbort = enmAbort;
2322 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
2323 {
2324 RTGCPHYS const GCPhysVmcs = IEM_VMX_GET_CURRENT_VMCS(pVCpu);
2325 uint32_t const offVmxAbort = RT_UOFFSETOF(VMXVVMCS, u32VmxAbortId);
2326 PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + offVmxAbort, &enmAbort, sizeof(enmAbort));
2327 }
2328
2329 return VINF_EM_TRIPLE_FAULT;
2330}
2331
2332
2333/**
2334 * Loads host control registers, debug registers and MSRs as part of VM-exit.
2335 *
2336 * @param pVCpu The cross context virtual CPU structure.
2337 */
2338IEM_STATIC void iemVmxVmexitLoadHostControlRegsMsrs(PVMCPU pVCpu)
2339{
2340 /*
2341 * Load host control registers, debug registers and MSRs.
2342 * See Intel spec. 27.5.1 "Loading Host Control Registers, Debug Registers, MSRs".
2343 */
2344 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2345 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2346
2347 /* CR0. */
2348 {
2349 /* Bits 63:32, 28:19, 17, 15:6, ET, CD, NW and CR0 MB1 bits are not modified. */
2350 uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
2351 uint64_t const fCr0IgnMask = UINT64_C(0xffffffff1ff8ffc0) | X86_CR0_ET | X86_CR0_CD | X86_CR0_NW | uCr0Fixed0;
2352 uint64_t const uHostCr0 = pVmcs->u64HostCr0.u;
2353 uint64_t const uGuestCr0 = pVCpu->cpum.GstCtx.cr0;
2354 uint64_t const uValidCr0 = (uHostCr0 & ~fCr0IgnMask) | (uGuestCr0 & fCr0IgnMask);
2355 CPUMSetGuestCR0(pVCpu, uValidCr0);
2356 }
2357
2358 /* CR4. */
2359 {
2360 /* CR4 MB1 bits are not modified. */
2361 uint64_t const fCr4IgnMask = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
2362 uint64_t const uHostCr4 = pVmcs->u64HostCr4.u;
2363 uint64_t const uGuestCr4 = pVCpu->cpum.GstCtx.cr4;
2364 uint64_t uValidCr4 = (uHostCr4 & ~fCr4IgnMask) | (uGuestCr4 & fCr4IgnMask);
2365 if (fHostInLongMode)
2366 uValidCr4 |= X86_CR4_PAE;
2367 else
2368 uValidCr4 &= ~X86_CR4_PCIDE;
2369 CPUMSetGuestCR4(pVCpu, uValidCr4);
2370 }
2371
2372 /* CR3 (host value validated while checking host-state during VM-entry). */
2373 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64HostCr3.u;
2374
2375 /* DR7. */
2376 pVCpu->cpum.GstCtx.dr[7] = X86_DR7_INIT_VAL;
2377
2378 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
2379
2380 /* Save SYSENTER CS, ESP, EIP (host value validated while checking host-state during VM-entry). */
2381 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64HostSysenterEip.u;
2382 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64HostSysenterEsp.u;
2383 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32HostSysenterCs;
2384
2385 /* FS, GS bases are loaded later while we load host segment registers. */
2386
2387 /* EFER MSR (host value validated while checking host-state during VM-entry). */
2388 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
2389 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64HostEferMsr.u;
2390 else if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2391 {
2392 if (fHostInLongMode)
2393 pVCpu->cpum.GstCtx.msrEFER |= (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
2394 else
2395 pVCpu->cpum.GstCtx.msrEFER &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
2396 }
2397
2398 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
2399
2400 /* PAT MSR (host value is validated while checking host-state during VM-entry). */
2401 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
2402 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64HostPatMsr.u;
2403
2404 /* We don't support IA32_BNDCFGS MSR yet. */
2405}
2406
2407
2408/**
2409 * Loads host segment registers, GDTR, IDTR, LDTR and TR as part of VM-exit.
2410 *
2411 * @param pVCpu The cross context virtual CPU structure.
2412 */
2413IEM_STATIC void iemVmxVmexitLoadHostSegRegs(PVMCPU pVCpu)
2414{
2415 /*
2416 * Load host segment registers, GDTR, IDTR, LDTR and TR.
2417 * See Intel spec. 27.5.2 "Loading Host Segment and Descriptor-Table Registers".
2418 *
2419 * Warning! Be careful to not touch fields that are reserved by VT-x,
2420 * e.g. segment limit high bits stored in segment attributes (in bits 11:8).
2421 */
2422 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2423 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2424
2425 /* CS, SS, ES, DS, FS, GS. */
2426 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
2427 {
2428 RTSEL const HostSel = iemVmxVmcsGetHostSelReg(pVmcs, iSegReg);
2429 bool const fUnusable = RT_BOOL(HostSel == 0);
2430
2431 /* Selector. */
2432 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel = HostSel;
2433 pVCpu->cpum.GstCtx.aSRegs[iSegReg].ValidSel = HostSel;
2434 pVCpu->cpum.GstCtx.aSRegs[iSegReg].fFlags = CPUMSELREG_FLAGS_VALID;
2435
2436 /* Limit. */
2437 pVCpu->cpum.GstCtx.aSRegs[iSegReg].u32Limit = 0xffffffff;
2438
2439 /* Base and Attributes. */
2440 switch (iSegReg)
2441 {
2442 case X86_SREG_CS:
2443 {
2444 pVCpu->cpum.GstCtx.cs.u64Base = 0;
2445 pVCpu->cpum.GstCtx.cs.Attr.n.u4Type = X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED;
2446 pVCpu->cpum.GstCtx.ss.Attr.n.u1DescType = 1;
2447 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = 0;
2448 pVCpu->cpum.GstCtx.cs.Attr.n.u1Present = 1;
2449 pVCpu->cpum.GstCtx.cs.Attr.n.u1Long = fHostInLongMode;
2450 pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig = !fHostInLongMode;
2451 pVCpu->cpum.GstCtx.cs.Attr.n.u1Granularity = 1;
2452 Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Unusable);
2453 Assert(!fUnusable);
2454 break;
2455 }
2456
2457 case X86_SREG_SS:
2458 case X86_SREG_ES:
2459 case X86_SREG_DS:
2460 {
2461 pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base = 0;
2462 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2463 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DescType = 1;
2464 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u2Dpl = 0;
2465 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Present = 1;
2466 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DefBig = 1;
2467 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Granularity = 1;
2468 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable = fUnusable;
2469 break;
2470 }
2471
2472 case X86_SREG_FS:
2473 {
2474 Assert(X86_IS_CANONICAL(pVmcs->u64HostFsBase.u));
2475 pVCpu->cpum.GstCtx.fs.u64Base = !fUnusable ? pVmcs->u64HostFsBase.u : 0;
2476 pVCpu->cpum.GstCtx.fs.Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2477 pVCpu->cpum.GstCtx.fs.Attr.n.u1DescType = 1;
2478 pVCpu->cpum.GstCtx.fs.Attr.n.u2Dpl = 0;
2479 pVCpu->cpum.GstCtx.fs.Attr.n.u1Present = 1;
2480 pVCpu->cpum.GstCtx.fs.Attr.n.u1DefBig = 1;
2481 pVCpu->cpum.GstCtx.fs.Attr.n.u1Granularity = 1;
2482 pVCpu->cpum.GstCtx.fs.Attr.n.u1Unusable = fUnusable;
2483 break;
2484 }
2485
2486 case X86_SREG_GS:
2487 {
2488 Assert(X86_IS_CANONICAL(pVmcs->u64HostGsBase.u));
2489 pVCpu->cpum.GstCtx.gs.u64Base = !fUnusable ? pVmcs->u64HostGsBase.u : 0;
2490 pVCpu->cpum.GstCtx.gs.Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2491 pVCpu->cpum.GstCtx.gs.Attr.n.u1DescType = 1;
2492 pVCpu->cpum.GstCtx.gs.Attr.n.u2Dpl = 0;
2493 pVCpu->cpum.GstCtx.gs.Attr.n.u1Present = 1;
2494 pVCpu->cpum.GstCtx.gs.Attr.n.u1DefBig = 1;
2495 pVCpu->cpum.GstCtx.gs.Attr.n.u1Granularity = 1;
2496 pVCpu->cpum.GstCtx.gs.Attr.n.u1Unusable = fUnusable;
2497 break;
2498 }
2499 }
2500 }
2501
2502 /* TR. */
2503 Assert(X86_IS_CANONICAL(pVmcs->u64HostTrBase.u));
2504 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1Unusable);
2505 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->HostTr;
2506 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->HostTr;
2507 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2508 pVCpu->cpum.GstCtx.tr.u32Limit = X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN;
2509 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64HostTrBase.u;
2510 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2511 pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType = 0;
2512 pVCpu->cpum.GstCtx.tr.Attr.n.u2Dpl = 0;
2513 pVCpu->cpum.GstCtx.tr.Attr.n.u1Present = 1;
2514 pVCpu->cpum.GstCtx.tr.Attr.n.u1DefBig = 0;
2515 pVCpu->cpum.GstCtx.tr.Attr.n.u1Granularity = 0;
2516
2517 /* LDTR. */
2518 pVCpu->cpum.GstCtx.ldtr.Sel = 0;
2519 pVCpu->cpum.GstCtx.ldtr.ValidSel = 0;
2520 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2521 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
2522 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
2523 pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Unusable = 1;
2524
2525 /* GDTR. */
2526 Assert(X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u));
2527 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64HostGdtrBase.u;
2528 pVCpu->cpum.GstCtx.gdtr.cbGdt = 0xfff;
2529
2530 /* IDTR.*/
2531 Assert(X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u));
2532 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64HostIdtrBase.u;
2533 pVCpu->cpum.GstCtx.idtr.cbIdt = 0xfff;
2534}
2535
2536
2537/**
2538 * Checks host PDPTes as part of VM-exit.
2539 *
2540 * @param pVCpu The cross context virtual CPU structure.
2541 * @param uExitReason The VM-exit reason (for logging purposes).
2542 */
2543IEM_STATIC int iemVmxVmexitCheckHostPdptes(PVMCPU pVCpu, uint32_t uExitReason)
2544{
2545 /*
2546 * Check host PDPTEs.
2547 * See Intel spec. 27.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
2548 */
2549 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2550 const char *const pszFailure = "VMX-abort";
2551 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2552
2553 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
2554 && !fHostInLongMode)
2555 {
2556 uint64_t const uHostCr3 = pVCpu->cpum.GstCtx.cr3 & X86_CR3_PAE_PAGE_MASK;
2557 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
2558 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uHostCr3, sizeof(aPdptes));
2559 if (RT_SUCCESS(rc))
2560 {
2561 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
2562 {
2563 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
2564 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
2565 { /* likely */ }
2566 else
2567 {
2568 VMXVDIAG const enmDiag = iemVmxGetDiagVmexitPdpteRsvd(iPdpte);
2569 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2570 }
2571 }
2572 }
2573 else
2574 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpteCr3ReadPhys);
2575 }
2576
2577 NOREF(pszFailure);
2578 NOREF(uExitReason);
2579 return VINF_SUCCESS;
2580}
2581
2582
2583/**
2584 * Loads the host MSRs from the VM-exit auto-load MSRs area as part of VM-exit.
2585 *
2586 * @returns VBox status code.
2587 * @param pVCpu The cross context virtual CPU structure.
2588 * @param pszInstr The VMX instruction name (for logging purposes).
2589 */
2590IEM_STATIC int iemVmxVmexitLoadHostAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
2591{
2592 /*
2593 * Load host MSRs.
2594 * See Intel spec. 27.6 "Loading MSRs".
2595 */
2596 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2597 const char *const pszFailure = "VMX-abort";
2598
2599 /*
2600 * The VM-exit MSR-load area address need not be a valid guest-physical address if the
2601 * VM-exit MSR load count is 0. If this is the case, bail early without reading it.
2602 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
2603 */
2604 uint32_t const cMsrs = pVmcs->u32ExitMsrLoadCount;
2605 if (!cMsrs)
2606 return VINF_SUCCESS;
2607
2608 /*
2609 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count
2610 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
2611 * implementation causes a VMX-abort followed by a triple-fault.
2612 */
2613 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
2614 if (fIsMsrCountValid)
2615 { /* likely */ }
2616 else
2617 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount);
2618
2619 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrLoad.u;
2620 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
2621 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
2622 if (RT_SUCCESS(rc))
2623 {
2624 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
2625 Assert(pMsr);
2626 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
2627 {
2628 if ( !pMsr->u32Reserved
2629 && pMsr->u32Msr != MSR_K8_FS_BASE
2630 && pMsr->u32Msr != MSR_K8_GS_BASE
2631 && pMsr->u32Msr != MSR_K6_EFER
2632 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
2633 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
2634 {
2635 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
2636 if (rcStrict == VINF_SUCCESS)
2637 continue;
2638
2639 /*
2640 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
2641 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
2642 * recording the MSR index in the auxiliary info. field and indicated further by our
2643 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
2644 * if possible, or come up with a better, generic solution.
2645 */
2646 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2647 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
2648 ? kVmxVDiag_Vmexit_MsrLoadRing3
2649 : kVmxVDiag_Vmexit_MsrLoad;
2650 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2651 }
2652 else
2653 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadRsvd);
2654 }
2655 }
2656 else
2657 {
2658 AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
2659 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys);
2660 }
2661
2662 NOREF(uExitReason);
2663 NOREF(pszFailure);
2664 return VINF_SUCCESS;
2665}
2666
2667
2668/**
2669 * Loads the host state as part of VM-exit.
2670 *
2671 * @returns Strict VBox status code.
2672 * @param pVCpu The cross context virtual CPU structure.
2673 * @param uExitReason The VM-exit reason (for logging purposes).
2674 */
2675IEM_STATIC VBOXSTRICTRC iemVmxVmexitLoadHostState(PVMCPU pVCpu, uint32_t uExitReason)
2676{
2677 /*
2678 * Load host state.
2679 * See Intel spec. 27.5 "Loading Host State".
2680 */
2681 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2682 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2683
2684 /* We cannot return from a long-mode guest to a host that is not in long mode. */
2685 if ( CPUMIsGuestInLongMode(pVCpu)
2686 && !fHostInLongMode)
2687 {
2688 Log(("VM-exit from long-mode guest to host not in long-mode -> VMX-Abort\n"));
2689 return iemVmxAbort(pVCpu, VMXABORT_HOST_NOT_IN_LONG_MODE);
2690 }
2691
2692 iemVmxVmexitLoadHostControlRegsMsrs(pVCpu);
2693 iemVmxVmexitLoadHostSegRegs(pVCpu);
2694
2695 /*
2696 * Load host RIP, RSP and RFLAGS.
2697 * See Intel spec. 27.5.3 "Loading Host RIP, RSP and RFLAGS"
2698 */
2699 pVCpu->cpum.GstCtx.rip = pVmcs->u64HostRip.u;
2700 pVCpu->cpum.GstCtx.rsp = pVmcs->u64HostRsp.u;
2701 pVCpu->cpum.GstCtx.rflags.u = X86_EFL_1;
2702
2703 /* Update non-register state. */
2704 iemVmxVmexitRestoreForceFlags(pVCpu);
2705
2706 /* Clear address range monitoring. */
2707 EMMonitorWaitClear(pVCpu);
2708
2709 /* Perform the VMX transition (PGM updates). */
2710 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
2711 if (rcStrict == VINF_SUCCESS)
2712 {
2713 /* Check host PDPTEs (only when we've fully switched page tables_. */
2714 /** @todo r=ramshankar: I don't know if PGM does this for us already or not... */
2715 int rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason);
2716 if (RT_FAILURE(rc))
2717 {
2718 Log(("VM-exit failed while restoring host PDPTEs -> VMX-Abort\n"));
2719 return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
2720 }
2721 }
2722 else if (RT_SUCCESS(rcStrict))
2723 {
2724 Log3(("VM-exit: iemVmxWorldSwitch returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),
2725 uExitReason));
2726 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2727 }
2728 else
2729 {
2730 Log3(("VM-exit: iemVmxWorldSwitch failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
2731 return VBOXSTRICTRC_VAL(rcStrict);
2732 }
2733
2734 Assert(rcStrict == VINF_SUCCESS);
2735
2736 /* Load MSRs from the VM-exit auto-load MSR area. */
2737 int rc = iemVmxVmexitLoadHostAutoMsrs(pVCpu, uExitReason);
2738 if (RT_FAILURE(rc))
2739 {
2740 Log(("VM-exit failed while loading host MSRs -> VMX-Abort\n"));
2741 return iemVmxAbort(pVCpu, VMXABORT_LOAD_HOST_MSR);
2742 }
2743
2744 return rcStrict;
2745}
2746
2747
2748/**
2749 * VMX VM-exit handler.
2750 *
2751 * @returns Strict VBox status code.
2752 * @retval VINF_VMX_VMEXIT when the VM-exit is successful.
2753 * @retval VINF_EM_TRIPLE_FAULT when VM-exit is unsuccessful and leads to a
2754 * triple-fault.
2755 *
2756 * @param pVCpu The cross context virtual CPU structure.
2757 * @param uExitReason The VM-exit reason.
2758 *
2759 * @remarks Make sure VM-exit qualification is updated before calling this
2760 * function!
2761 */
2762IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason)
2763{
2764 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2765 Assert(pVmcs);
2766
2767 pVmcs->u32RoExitReason = uExitReason;
2768
2769 /** @todo NSTVMX: IEMGetCurrentXcpt will be VM-exit interruption info. */
2770 /** @todo NSTVMX: The source event should be recorded in IDT-vectoring info
2771 * during injection. */
2772
2773 /*
2774 * Save the guest state back into the VMCS.
2775 * We only need to save the state when the VM-entry was successful.
2776 */
2777 bool const fVmentryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
2778 if (!fVmentryFailed)
2779 {
2780 iemVmxVmexitSaveGuestState(pVCpu, uExitReason);
2781 int rc = iemVmxVmexitSaveGuestAutoMsrs(pVCpu, uExitReason);
2782 if (RT_SUCCESS(rc))
2783 { /* likely */ }
2784 else
2785 return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS);
2786 }
2787
2788 /*
2789 * The high bits of the VM-exit reason are only relevant when the VM-exit occurs in
2790 * enclave mode/SMM which we don't support yet. If we ever add support for it, we can
2791 * pass just the lower bits, till then an assert should suffice.
2792 */
2793 Assert(!RT_HI_U16(uExitReason));
2794
2795 VBOXSTRICTRC rcStrict = iemVmxVmexitLoadHostState(pVCpu, uExitReason);
2796 if (RT_FAILURE(rcStrict))
2797 LogFunc(("Loading host-state failed. uExitReason=%u rc=%Rrc\n", uExitReason, VBOXSTRICTRC_VAL(rcStrict)));
2798
2799 /* We're no longer in nested-guest execution mode. */
2800 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
2801
2802 Assert(rcStrict == VINF_SUCCESS);
2803 return VINF_VMX_VMEXIT;
2804}
2805
2806
2807/**
2808 * VMX VM-exit handler for VM-exits due to instruction execution.
2809 *
2810 * This is intended for instructions where the caller provides all the relevant
2811 * VM-exit information.
2812 *
2813 * @returns Strict VBox status code.
2814 * @param pVCpu The cross context virtual CPU structure.
2815 * @param pExitInfo Pointer to the VM-exit instruction information struct.
2816 */
2817DECLINLINE(VBOXSTRICTRC) iemVmxVmexitInstrWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
2818{
2819 /*
2820 * For instructions where any of the following fields are not applicable:
2821 * - VM-exit instruction info. is undefined.
2822 * - VM-exit qualification must be cleared.
2823 * - VM-exit guest-linear address is undefined.
2824 * - VM-exit guest-physical address is undefined.
2825 *
2826 * The VM-exit instruction length is mandatory for all VM-exits that are caused by
2827 * instruction execution. For VM-exits that are not due to instruction execution this
2828 * field is undefined.
2829 *
2830 * In our implementation in IEM, all undefined fields are generally cleared. However,
2831 * if the caller supplies information (from say the physical CPU directly) it is
2832 * then possible that the undefined fields are not cleared.
2833 *
2834 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2835 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
2836 */
2837 Assert(pExitInfo);
2838 AssertMsg(pExitInfo->uReason <= VMX_EXIT_MAX, ("uReason=%u\n", pExitInfo->uReason));
2839 AssertMsg(pExitInfo->cbInstr >= 1 && pExitInfo->cbInstr <= 15,
2840 ("uReason=%u cbInstr=%u\n", pExitInfo->uReason, pExitInfo->cbInstr));
2841
2842 /* Update all the relevant fields from the VM-exit instruction information struct. */
2843 iemVmxVmcsSetExitInstrInfo(pVCpu, pExitInfo->InstrInfo.u);
2844 iemVmxVmcsSetExitQual(pVCpu, pExitInfo->u64Qual);
2845 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, pExitInfo->u64GuestLinearAddr);
2846 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, pExitInfo->u64GuestPhysAddr);
2847 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
2848
2849 /* Perform the VM-exit. */
2850 return iemVmxVmexit(pVCpu, pExitInfo->uReason);
2851}
2852
2853
2854/**
2855 * VMX VM-exit handler for VM-exits due to instruction execution.
2856 *
2857 * This is intended for instructions that only provide the VM-exit instruction
2858 * length.
2859 *
2860 * @param pVCpu The cross context virtual CPU structure.
2861 * @param uExitReason The VM-exit reason.
2862 * @param cbInstr The instruction length in bytes.
2863 */
2864IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstr(PVMCPU pVCpu, uint32_t uExitReason, uint8_t cbInstr)
2865{
2866 VMXVEXITINFO ExitInfo;
2867 RT_ZERO(ExitInfo);
2868 ExitInfo.uReason = uExitReason;
2869 ExitInfo.cbInstr = cbInstr;
2870
2871#ifdef VBOX_STRICT
2872 /* To prevent us from shooting ourselves in the foot. Maybe remove later. */
2873 switch (uExitReason)
2874 {
2875 case VMX_EXIT_INVEPT:
2876 case VMX_EXIT_INVPCID:
2877 case VMX_EXIT_LDTR_TR_ACCESS:
2878 case VMX_EXIT_GDTR_IDTR_ACCESS:
2879 case VMX_EXIT_VMCLEAR:
2880 case VMX_EXIT_VMPTRLD:
2881 case VMX_EXIT_VMPTRST:
2882 case VMX_EXIT_VMREAD:
2883 case VMX_EXIT_VMWRITE:
2884 case VMX_EXIT_VMXON:
2885 case VMX_EXIT_XRSTORS:
2886 case VMX_EXIT_XSAVES:
2887 case VMX_EXIT_RDRAND:
2888 case VMX_EXIT_RDSEED:
2889 case VMX_EXIT_IO_INSTR:
2890 AssertMsgFailedReturn(("Use iemVmxVmexitInstrNeedsInfo for uExitReason=%u\n", uExitReason), VERR_IEM_IPE_5);
2891 break;
2892 }
2893#endif
2894
2895 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2896}
2897
2898
2899/**
2900 * VMX VM-exit handler for VM-exits due to instruction execution.
2901 *
2902 * This is intended for instructions that have a ModR/M byte and update the VM-exit
2903 * instruction information and VM-exit qualification fields.
2904 *
2905 * @param pVCpu The cross context virtual CPU structure.
2906 * @param uExitReason The VM-exit reason.
2907 * @param uInstrid The instruction identity (VMXINSTRID_XXX).
2908 * @param cbInstr The instruction length in bytes.
2909 *
2910 * @remarks Do not use this for INS/OUTS instruction.
2911 */
2912IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr)
2913{
2914 VMXVEXITINFO ExitInfo;
2915 RT_ZERO(ExitInfo);
2916 ExitInfo.uReason = uExitReason;
2917 ExitInfo.cbInstr = cbInstr;
2918
2919 /*
2920 * Update the VM-exit qualification field with displacement bytes.
2921 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2922 */
2923 switch (uExitReason)
2924 {
2925 case VMX_EXIT_INVEPT:
2926 case VMX_EXIT_INVPCID:
2927 case VMX_EXIT_LDTR_TR_ACCESS:
2928 case VMX_EXIT_GDTR_IDTR_ACCESS:
2929 case VMX_EXIT_VMCLEAR:
2930 case VMX_EXIT_VMPTRLD:
2931 case VMX_EXIT_VMPTRST:
2932 case VMX_EXIT_VMREAD:
2933 case VMX_EXIT_VMWRITE:
2934 case VMX_EXIT_VMXON:
2935 case VMX_EXIT_XRSTORS:
2936 case VMX_EXIT_XSAVES:
2937 case VMX_EXIT_RDRAND:
2938 case VMX_EXIT_RDSEED:
2939 {
2940 /* Construct the VM-exit instruction information. */
2941 RTGCPTR GCPtrDisp;
2942 uint32_t const uInstrInfo = iemVmxGetExitInstrInfo(pVCpu, uExitReason, uInstrId, &GCPtrDisp);
2943
2944 /* Update the VM-exit instruction information. */
2945 ExitInfo.InstrInfo.u = uInstrInfo;
2946
2947 /* Update the VM-exit qualification. */
2948 ExitInfo.u64Qual = GCPtrDisp;
2949 break;
2950 }
2951
2952 default:
2953 AssertMsgFailedReturn(("Use instruction-specific handler\n"), VERR_IEM_IPE_5);
2954 break;
2955 }
2956
2957 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2958}
2959
2960
2961/**
2962 * Checks whether an I/O instruction for the given port is intercepted (causes a
2963 * VM-exit) or not.
2964 *
2965 * @returns @c true if the instruction is intercepted, @c false otherwise.
2966 * @param pVCpu The cross context virtual CPU structure.
2967 * @param u16Port The I/O port being accessed by the instruction.
2968 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
2969 */
2970IEM_STATIC bool iemVmxIsIoInterceptSet(PVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
2971{
2972 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2973 Assert(pVmcs);
2974
2975 /*
2976 * Check whether the I/O instruction must cause a VM-exit or not.
2977 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
2978 */
2979 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT)
2980 return true;
2981
2982 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
2983 {
2984 uint8_t const *pbIoBitmapA = (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvIoBitmap);
2985 uint8_t const *pbIoBitmapB = (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvIoBitmap) + VMX_V_IO_BITMAP_A_SIZE;
2986 Assert(pbIoBitmapA);
2987 Assert(pbIoBitmapB);
2988 return HMVmxGetIoBitmapPermission(pbIoBitmapA, pbIoBitmapB, u16Port, cbAccess);
2989 }
2990
2991 return false;
2992}
2993
2994
2995/**
2996 * VMX VM-exit handler for VM-exits due to INVLPG.
2997 *
2998 * @param pVCpu The cross context virtual CPU structure.
2999 * @param GCPtrPage The guest-linear address of the page being invalidated.
3000 * @param cbInstr The instruction length in bytes.
3001 */
3002IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrInvlpg(PVMCPU pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr)
3003{
3004 VMXVEXITINFO ExitInfo;
3005 RT_ZERO(ExitInfo);
3006 ExitInfo.uReason = VMX_EXIT_INVLPG;
3007 ExitInfo.cbInstr = cbInstr;
3008 ExitInfo.u64Qual = GCPtrPage;
3009 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(ExitInfo.u64Qual));
3010
3011 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3012}
3013
3014
3015/**
3016 * VMX VM-exit handler for VM-exits due to LMSW.
3017 *
3018 * @returns Strict VBox status code.
3019 * @param pVCpu The cross context virtual CPU structure.
3020 * @param uGuestCr0 The current guest CR0.
3021 * @param pu16NewMsw The machine-status word specified in LMSW's source
3022 * operand. This will be updated depending on the VMX
3023 * guest/host CR0 mask if LMSW is not intercepted.
3024 * @param GCPtrEffDst The guest-linear address of the source operand in case
3025 * of a memory operand. For register operand, pass
3026 * NIL_RTGCPTR.
3027 * @param cbInstr The instruction length in bytes.
3028 */
3029IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPU pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw, RTGCPTR GCPtrEffDst,
3030 uint8_t cbInstr)
3031{
3032 /*
3033 * LMSW VM-exits are subject to the CR0 guest/host mask and the CR0 read shadow.
3034 *
3035 * See Intel spec. 24.6.6 "Guest/Host Masks and Read Shadows for CR0 and CR4".
3036 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3037 */
3038 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3039 Assert(pVmcs);
3040 Assert(pu16NewMsw);
3041
3042 bool fIntercept = false;
3043 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
3044 uint32_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
3045
3046 /*
3047 * LMSW can never clear CR0.PE but it may set it. Hence, we handle the
3048 * CR0.PE case first, before the rest of the bits in the MSW.
3049 *
3050 * If CR0.PE is owned by the host and CR0.PE differs between the
3051 * MSW (source operand) and the read-shadow, we must cause a VM-exit.
3052 */
3053 if ( (fGstHostMask & X86_CR0_PE)
3054 && (*pu16NewMsw & X86_CR0_PE)
3055 && !(fReadShadow & X86_CR0_PE))
3056 fIntercept = true;
3057
3058 /*
3059 * If CR0.MP, CR0.EM or CR0.TS is owned by the host, and the corresponding
3060 * bits differ between the MSW (source operand) and the read-shadow, we must
3061 * cause a VM-exit.
3062 */
3063 uint32_t fGstHostLmswMask = fGstHostMask & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3064 if ((fReadShadow & fGstHostLmswMask) != (*pu16NewMsw & fGstHostLmswMask))
3065 fIntercept = true;
3066
3067 if (fIntercept)
3068 {
3069 Log2(("lmsw: Guest intercept -> VM-exit\n"));
3070
3071 VMXVEXITINFO ExitInfo;
3072 RT_ZERO(ExitInfo);
3073 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3074 ExitInfo.cbInstr = cbInstr;
3075
3076 bool const fMemOperand = RT_BOOL(GCPtrEffDst != NIL_RTGCPTR);
3077 if (fMemOperand)
3078 {
3079 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(GCPtrEffDst));
3080 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
3081 }
3082
3083 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
3084 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_LMSW)
3085 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_OP, fMemOperand)
3086 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_DATA, *pu16NewMsw);
3087
3088 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3089 }
3090
3091 /*
3092 * If LMSW did not cause a VM-exit, any CR0 bits in the range 0:3 that is set in the
3093 * CR0 guest/host mask must be left unmodified.
3094 *
3095 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3096 */
3097 fGstHostLmswMask = fGstHostMask & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3098 *pu16NewMsw = (uGuestCr0 & fGstHostLmswMask) | (*pu16NewMsw & ~fGstHostLmswMask);
3099
3100 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3101}
3102
3103
3104/**
3105 * VMX VM-exit handler for VM-exits due to CLTS.
3106 *
3107 * @returns Strict VBox status code.
3108 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the CLTS instruction did not cause a
3109 * VM-exit but must not modify the guest CR0.TS bit.
3110 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the CLTS instruction did not cause a
3111 * VM-exit and modification to the guest CR0.TS bit is allowed (subject to
3112 * CR0 fixed bits in VMX operation).
3113 * @param pVCpu The cross context virtual CPU structure.
3114 * @param cbInstr The instruction length in bytes.
3115 */
3116IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPU pVCpu, uint8_t cbInstr)
3117{
3118 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3119 Assert(pVmcs);
3120
3121 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
3122 uint32_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
3123
3124 /*
3125 * If CR0.TS is owned by the host:
3126 * - If CR0.TS is set in the read-shadow, we must cause a VM-exit.
3127 * - If CR0.TS is cleared in the read-shadow, no VM-exit is caused and the
3128 * CLTS instruction completes without clearing CR0.TS.
3129 *
3130 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3131 */
3132 if (fGstHostMask & X86_CR0_TS)
3133 {
3134 if (fReadShadow & X86_CR0_TS)
3135 {
3136 Log2(("clts: Guest intercept -> VM-exit\n"));
3137
3138 VMXVEXITINFO ExitInfo;
3139 RT_ZERO(ExitInfo);
3140 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3141 ExitInfo.cbInstr = cbInstr;
3142
3143 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
3144 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_CLTS);
3145 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3146 }
3147
3148 return VINF_VMX_MODIFIES_BEHAVIOR;
3149 }
3150
3151 /*
3152 * If CR0.TS is not owned by the host, the CLTS instructions operates normally
3153 * and may modify CR0.TS (subject to CR0 fixed bits in VMX operation).
3154 */
3155 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3156}
3157
3158
3159/**
3160 * VMX VM-exit handler for VM-exits due to 'Mov CR0,GReg' and 'Mov CR4,GReg'
3161 * (CR0/CR4 write).
3162 *
3163 * @returns Strict VBox status code.
3164 * @param pVCpu The cross context virtual CPU structure.
3165 * @param iCrReg The control register (either CR0 or CR4).
3166 * @param uGuestCrX The current guest CR0/CR4.
3167 * @param puNewCrX Pointer to the new CR0/CR4 value. Will be updated
3168 * if no VM-exit is caused.
3169 * @param iGReg The general register from which the CR0/CR4 value is
3170 * being loaded.
3171 * @param cbInstr The instruction length in bytes.
3172 */
3173IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr0Cr4(PVMCPU pVCpu, uint8_t iCrReg, uint64_t *puNewCrX, uint8_t iGReg,
3174 uint8_t cbInstr)
3175{
3176 Assert(puNewCrX);
3177 Assert(iCrReg == 0 || iCrReg == 4);
3178
3179 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3180 Assert(pVmcs);
3181
3182 uint64_t uGuestCrX;
3183 uint64_t fGstHostMask;
3184 uint64_t fReadShadow;
3185 if (iCrReg == 0)
3186 {
3187 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
3188 uGuestCrX = pVCpu->cpum.GstCtx.cr0;
3189 fGstHostMask = pVmcs->u64Cr0Mask.u;
3190 fReadShadow = pVmcs->u64Cr0ReadShadow.u;
3191 }
3192 else
3193 {
3194 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
3195 uGuestCrX = pVCpu->cpum.GstCtx.cr4;
3196 fGstHostMask = pVmcs->u64Cr4Mask.u;
3197 fReadShadow = pVmcs->u64Cr4ReadShadow.u;
3198 }
3199
3200 /*
3201 * For any CR0/CR4 bit owned by the host (in the CR0/CR4 guest/host mask), if the
3202 * corresponding bits differ between the source operand and the read-shadow,
3203 * we must cause a VM-exit.
3204 *
3205 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3206 */
3207 if ((fReadShadow & fGstHostMask) != (*puNewCrX & fGstHostMask))
3208 {
3209 Log2(("mov_Cr_Rd: (CR%u) Guest intercept -> VM-exit\n", iCrReg));
3210
3211 VMXVEXITINFO ExitInfo;
3212 RT_ZERO(ExitInfo);
3213 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3214 ExitInfo.cbInstr = cbInstr;
3215
3216 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, iCrReg)
3217 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3218 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3219 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3220 }
3221
3222 /*
3223 * If the Mov-to-CR0/CR4 did not cause a VM-exit, any bits owned by the host
3224 * must not be modified the instruction.
3225 *
3226 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3227 */
3228 *puNewCrX = (uGuestCrX & fGstHostMask) | (*puNewCrX & ~fGstHostMask);
3229
3230 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3231}
3232
3233
3234/**
3235 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR3' (CR3 read).
3236 *
3237 * @returns VBox strict status code.
3238 * @param pVCpu The cross context virtual CPU structure.
3239 * @param iGReg The general register to which the CR3 value is being stored.
3240 * @param cbInstr The instruction length in bytes.
3241 */
3242IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3243{
3244 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3245 Assert(pVmcs);
3246 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
3247
3248 /*
3249 * If the CR3-store exiting control is set, we must cause a VM-exit.
3250 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3251 */
3252 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT)
3253 {
3254 Log2(("mov_Rd_Cr: (CR3) Guest intercept -> VM-exit\n"));
3255
3256 VMXVEXITINFO ExitInfo;
3257 RT_ZERO(ExitInfo);
3258 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3259 ExitInfo.cbInstr = cbInstr;
3260
3261 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
3262 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ)
3263 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3264 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3265 }
3266
3267 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3268}
3269
3270
3271/**
3272 * VMX VM-exit handler for VM-exits due to 'Mov CR3,GReg' (CR3 write).
3273 *
3274 * @returns VBox strict status code.
3275 * @param pVCpu The cross context virtual CPU structure.
3276 * @param uNewCr3 The new CR3 value.
3277 * @param iGReg The general register from which the CR3 value is being
3278 * loaded.
3279 * @param cbInstr The instruction length in bytes.
3280 */
3281IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr3(PVMCPU pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr)
3282{
3283 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3284 Assert(pVmcs);
3285
3286 /*
3287 * If the CR3-load exiting control is set and the new CR3 value does not
3288 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
3289 *
3290 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3291 */
3292 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_LOAD_EXIT)
3293 {
3294 uint32_t uCr3TargetCount = pVmcs->u32Cr3TargetCount;
3295 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
3296
3297 for (uint32_t idxCr3Target = 0; idxCr3Target < uCr3TargetCount; idxCr3Target++)
3298 {
3299 uint64_t const uCr3TargetValue = iemVmxVmcsGetCr3TargetValue(pVmcs, idxCr3Target);
3300 if (uNewCr3 != uCr3TargetValue)
3301 {
3302 Log2(("mov_Cr_Rd: (CR3) Guest intercept -> VM-exit\n"));
3303
3304 VMXVEXITINFO ExitInfo;
3305 RT_ZERO(ExitInfo);
3306 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3307 ExitInfo.cbInstr = cbInstr;
3308
3309 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
3310 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3311 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3312 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3313 }
3314 }
3315 }
3316
3317 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3318}
3319
3320
3321/**
3322 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR8' (CR8 read).
3323 *
3324 * @returns VBox strict status code.
3325 * @param pVCpu The cross context virtual CPU structure.
3326 * @param iGReg The general register to which the CR8 value is being stored.
3327 * @param cbInstr The instruction length in bytes.
3328 */
3329IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3330{
3331 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3332 Assert(pVmcs);
3333
3334 /*
3335 * If the CR8-store exiting control is set, we must cause a VM-exit.
3336 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3337 */
3338 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT)
3339 {
3340 Log2(("mov_Rd_Cr: (CR8) Guest intercept -> VM-exit\n"));
3341
3342 VMXVEXITINFO ExitInfo;
3343 RT_ZERO(ExitInfo);
3344 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3345 ExitInfo.cbInstr = cbInstr;
3346
3347 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
3348 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ)
3349 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3350 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3351 }
3352
3353 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3354}
3355
3356
3357/**
3358 * VMX VM-exit handler for VM-exits due to 'Mov CR8,GReg' (CR8 write).
3359 *
3360 * @returns VBox strict status code.
3361 * @param pVCpu The cross context virtual CPU structure.
3362 * @param iGReg The general register from which the CR8 value is being
3363 * loaded.
3364 * @param cbInstr The instruction length in bytes.
3365 */
3366IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3367{
3368 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3369 Assert(pVmcs);
3370
3371 /*
3372 * If the CR8-load exiting control is set, we must cause a VM-exit.
3373 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3374 */
3375 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT)
3376 {
3377 Log2(("mov_Cr_Rd: (CR8) Guest intercept -> VM-exit\n"));
3378
3379 VMXVEXITINFO ExitInfo;
3380 RT_ZERO(ExitInfo);
3381 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3382 ExitInfo.cbInstr = cbInstr;
3383
3384 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
3385 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3386 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3387 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3388 }
3389
3390 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3391}
3392
3393
3394/**
3395 * VMX VM-exit handler for VM-exits due to 'Mov DRx,GReg' (DRx write) and 'Mov
3396 * GReg,DRx' (DRx read).
3397 *
3398 * @returns VBox strict status code.
3399 * @param pVCpu The cross context virtual CPU structure.
3400 * @param uInstrid The instruction identity (VMXINSTRID_MOV_TO_DRX or
3401 * VMXINSTRID_MOV_FROM_DRX).
3402 * @param iDrReg The debug register being accessed.
3403 * @param iGReg The general register to/from which the DRx value is being
3404 * store/loaded.
3405 * @param cbInstr The instruction length in bytes.
3406 */
3407IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovDrX(PVMCPU pVCpu, VMXINSTRID uInstrId, uint8_t iDrReg, uint8_t iGReg,
3408 uint8_t cbInstr)
3409{
3410 Assert(iDrReg <= 7);
3411 Assert(uInstrId == VMXINSTRID_MOV_TO_DRX || uInstrId == VMXINSTRID_MOV_FROM_DRX);
3412
3413 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3414 Assert(pVmcs);
3415
3416 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT)
3417 {
3418 uint32_t const uDirection = uInstrId == VMXINSTRID_MOV_TO_DRX ? VMX_EXIT_QUAL_DRX_DIRECTION_WRITE
3419 : VMX_EXIT_QUAL_DRX_DIRECTION_READ;
3420 VMXVEXITINFO ExitInfo;
3421 RT_ZERO(ExitInfo);
3422 ExitInfo.uReason = VMX_EXIT_MOV_DRX;
3423 ExitInfo.cbInstr = cbInstr;
3424 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_REGISTER, iDrReg)
3425 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_DIRECTION, uDirection)
3426 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_GENREG, iGReg);
3427 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3428 }
3429
3430 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3431}
3432
3433
3434/**
3435 * VMX VM-exit handler for VM-exits due to I/O instructions (IN and OUT).
3436 *
3437 * @returns VBox strict status code.
3438 * @param pVCpu The cross context virtual CPU structure.
3439 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_IO_IN or
3440 * VMXINSTRID_IO_OUT).
3441 * @param u16Port The I/O port being accessed.
3442 * @param fImm Whether the I/O port was encoded using an immediate operand
3443 * or the implicit DX register.
3444 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3445 * @param cbInstr The instruction length in bytes.
3446 */
3447IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrIo(PVMCPU pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, bool fImm, uint8_t cbAccess,
3448 uint8_t cbInstr)
3449{
3450 Assert(uInstrId == VMXINSTRID_IO_IN || uInstrId == VMXINSTRID_IO_OUT);
3451 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3452
3453 bool const fIntercept = iemVmxIsIoInterceptSet(pVCpu, u16Port, cbAccess);
3454 if (fIntercept)
3455 {
3456 uint32_t const uDirection = uInstrId == VMXINSTRID_IO_IN ? VMX_EXIT_QUAL_IO_DIRECTION_IN
3457 : VMX_EXIT_QUAL_IO_DIRECTION_OUT;
3458 VMXVEXITINFO ExitInfo;
3459 RT_ZERO(ExitInfo);
3460 ExitInfo.uReason = VMX_EXIT_IO_INSTR;
3461 ExitInfo.cbInstr = cbInstr;
3462 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_WIDTH, cbAccess - 1)
3463 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_DIRECTION, uDirection)
3464 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_ENCODING, fImm)
3465 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_PORT, u16Port);
3466 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3467 }
3468
3469 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3470}
3471
3472
3473/**
3474 * VMX VM-exit handler for VM-exits due to string I/O instructions (INS and OUTS).
3475 *
3476 * @returns VBox strict status code.
3477 * @param pVCpu The cross context virtual CPU structure.
3478 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_IO_INS or
3479 * VMXINSTRID_IO_OUTS).
3480 * @param u16Port The I/O port being accessed.
3481 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3482 * @param fRep Whether the instruction has a REP prefix or not.
3483 * @param ExitInstrInfo The VM-exit instruction info. field.
3484 * @param cbInstr The instruction length in bytes.
3485 */
3486IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrStrIo(PVMCPU pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, uint8_t cbAccess, bool fRep,
3487 VMXEXITINSTRINFO ExitInstrInfo, uint8_t cbInstr)
3488{
3489 Assert(uInstrId == VMXINSTRID_IO_INS || uInstrId == VMXINSTRID_IO_OUTS);
3490 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3491 Assert(ExitInstrInfo.StrIo.iSegReg < X86_SREG_COUNT);
3492 Assert(ExitInstrInfo.StrIo.u3AddrSize == 0 || ExitInstrInfo.StrIo.u3AddrSize == 1 || ExitInstrInfo.StrIo.u3AddrSize == 2);
3493 Assert(uInstrId != VMXINSTRID_IO_INS || ExitInstrInfo.StrIo.iSegReg == X86_SREG_ES);
3494
3495 bool const fIntercept = iemVmxIsIoInterceptSet(pVCpu, u16Port, cbAccess);
3496 if (fIntercept)
3497 {
3498 /*
3499 * Figure out the guest-linear address and the direction bit (INS/OUTS).
3500 */
3501 /** @todo r=ramshankar: Is there something in IEM that already does this? */
3502 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
3503 uint8_t const iSegReg = ExitInstrInfo.StrIo.iSegReg;
3504 uint8_t const uAddrSize = ExitInstrInfo.StrIo.u3AddrSize;
3505 uint64_t const uAddrSizeMask = s_auAddrSizeMasks[uAddrSize];
3506
3507 uint32_t uDirection;
3508 uint64_t uGuestLinearAddr;
3509 if (uInstrId == VMXINSTRID_IO_INS)
3510 {
3511 uDirection = VMX_EXIT_QUAL_IO_DIRECTION_IN;
3512 uGuestLinearAddr = pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base + (pVCpu->cpum.GstCtx.rdi & uAddrSizeMask);
3513 }
3514 else
3515 {
3516 uDirection = VMX_EXIT_QUAL_IO_DIRECTION_OUT;
3517 uGuestLinearAddr = pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base + (pVCpu->cpum.GstCtx.rsi & uAddrSizeMask);
3518 }
3519
3520 /*
3521 * If the segment is ununsable, the guest-linear address in undefined.
3522 * We shall clear it for consistency.
3523 *
3524 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
3525 */
3526 if (pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable)
3527 uGuestLinearAddr = 0;
3528
3529 VMXVEXITINFO ExitInfo;
3530 RT_ZERO(ExitInfo);
3531 ExitInfo.uReason = VMX_EXIT_IO_INSTR;
3532 ExitInfo.cbInstr = cbInstr;
3533 ExitInfo.InstrInfo = ExitInstrInfo;
3534 ExitInfo.u64GuestLinearAddr = uGuestLinearAddr;
3535 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_WIDTH, cbAccess - 1)
3536 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_DIRECTION, uDirection)
3537 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_IS_STRING, 1)
3538 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_IS_REP, fRep)
3539 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_ENCODING, VMX_EXIT_QUAL_IO_ENCODING_DX)
3540 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_PORT, u16Port);
3541 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3542 }
3543
3544 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3545}
3546
3547
3548/**
3549 * VMX VM-exit handler for VM-exits due to MWAIT.
3550 *
3551 * @returns VBox strict status code.
3552 * @param pVCpu The cross context virtual CPU structure.
3553 * @param fMonitorHwArmed Whether the address-range monitor hardware is armed.
3554 * @param cbInstr The instruction length in bytes.
3555 */
3556IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMwait(PVMCPU pVCpu, bool fMonitorHwArmed, uint8_t cbInstr)
3557{
3558 VMXVEXITINFO ExitInfo;
3559 RT_ZERO(ExitInfo);
3560 ExitInfo.uReason = VMX_EXIT_MWAIT;
3561 ExitInfo.cbInstr = cbInstr;
3562 ExitInfo.u64Qual = fMonitorHwArmed;
3563 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3564}
3565
3566
3567/**
3568 * VMX VM-exit handler for VM-exits due to PAUSE.
3569 *
3570 * @returns VBox strict status code.
3571 * @param pVCpu The cross context virtual CPU structure.
3572 * @param cbInstr The instruction length in bytes.
3573 */
3574IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrPause(PVMCPU pVCpu, uint8_t cbInstr)
3575{
3576 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3577 Assert(pVmcs);
3578
3579 /*
3580 * The PAUSE VM-exit is controlled by the "PAUSE exiting" control and the
3581 * "PAUSE-loop exiting" control.
3582 *
3583 * The PLE-Gap is the maximum number of TSC ticks between two successive executions of
3584 * the PAUSE instruction before we cause a VM-exit. The PLE-Window is the maximum amount
3585 * of TSC ticks the guest is allowed to execute in a pause loop before we must cause
3586 * a VM-exit.
3587 *
3588 * See Intel spec. 24.6.13 "Controls for PAUSE-Loop Exiting".
3589 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3590 */
3591 bool fIntercept = false;
3592 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_PAUSE_EXIT)
3593 fIntercept = true;
3594 else if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)
3595 && pVCpu->iem.s.uCpl == 0)
3596 {
3597 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
3598
3599 /*
3600 * A previous-PAUSE-tick value of 0 is used to identify the first time
3601 * execution of a PAUSE instruction after VM-entry at CPL 0. We must
3602 * consider this to be the first execution of PAUSE in a loop according
3603 * to the Intel.
3604 *
3605 * All subsequent records for the previous-PAUSE-tick we ensure that it
3606 * cannot be zero by OR'ing 1 to rule out the TSC wrap-around cases at 0.
3607 */
3608 uint64_t *puFirstPauseLoopTick = &pVCpu->cpum.GstCtx.hwvirt.vmx.uFirstPauseLoopTick;
3609 uint64_t *puPrevPauseTick = &pVCpu->cpum.GstCtx.hwvirt.vmx.uPrevPauseTick;
3610 uint64_t const uTick = TMCpuTickGet(pVCpu);
3611 uint32_t const uPleGap = pVmcs->u32PleGap;
3612 uint32_t const uPleWindow = pVmcs->u32PleWindow;
3613 if ( *puPrevPauseTick == 0
3614 || uTick - *puPrevPauseTick > uPleGap)
3615 *puFirstPauseLoopTick = uTick;
3616 else if (uTick - *puFirstPauseLoopTick > uPleWindow)
3617 fIntercept = true;
3618
3619 *puPrevPauseTick = uTick | 1;
3620 }
3621
3622 if (fIntercept)
3623 {
3624 VMXVEXITINFO ExitInfo;
3625 RT_ZERO(ExitInfo);
3626 ExitInfo.uReason = VMX_EXIT_PAUSE;
3627 ExitInfo.cbInstr = cbInstr;
3628 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3629 }
3630
3631 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3632}
3633
3634
3635/**
3636 * VMX VM-exit handler for VM-exits due to task switches.
3637 *
3638 * @returns VBox strict status code.
3639 * @param pVCpu The cross context virtual CPU structure.
3640 * @param enmTaskSwitch The cause of the task switch.
3641 * @param SelNewTss The selector of the new TSS.
3642 * @param cbInstr The instruction length in bytes.
3643 */
3644IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr)
3645{
3646 /*
3647 * Task-switch VM-exits are unconditional and provide the VM-exit qualification.
3648 *
3649 * If the the cause of the task switch is due to execution of CALL, IRET or the JMP
3650 * instruction or delivery of the exception generated by one of these instructions
3651 * lead to a task switch through a task gate in the IDT, we need to provide the
3652 * VM-exit instruction length. Any other means of invoking a task switch VM-exit
3653 * leaves the VM-exit instruction length field undefined.
3654 *
3655 * See Intel spec. 25.2 "Other Causes Of VM Exits".
3656 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
3657 */
3658 Assert(cbInstr <= 15);
3659
3660 uint8_t uType;
3661 switch (enmTaskSwitch)
3662 {
3663 case IEMTASKSWITCH_CALL: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_CALL; break;
3664 case IEMTASKSWITCH_IRET: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IRET; break;
3665 case IEMTASKSWITCH_JUMP: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_JMP; break;
3666 case IEMTASKSWITCH_INT_XCPT: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT; break;
3667 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3668 }
3669
3670 uint64_t const uExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_NEW_TSS, SelNewTss)
3671 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_SOURCE, uType);
3672 iemVmxVmcsSetExitQual(pVCpu, uExitQual);
3673 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
3674 return iemVmxVmexit(pVCpu, VMX_EXIT_TASK_SWITCH);
3675}
3676
3677
3678/**
3679 * VMX VM-exit handler for VM-exits due to expiry of the preemption timer.
3680 *
3681 * @returns VBox strict status code.
3682 * @param pVCpu The cross context virtual CPU structure.
3683 */
3684IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPU pVCpu)
3685{
3686 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3687 Assert(pVmcs);
3688 Assert(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER);
3689 NOREF(pVmcs);
3690
3691 iemVmxVmcsSetExitQual(pVCpu, 0);
3692 return iemVmxVmexit(pVCpu, VMX_EXIT_PREEMPT_TIMER);
3693}
3694
3695
3696/**
3697 * VMX VM-exit handler for VM-exits due to external interrupts.
3698 *
3699 * @returns VBox strict status code.
3700 * @param pVCpu The cross context virtual CPU structure.
3701 * @param uVector The external interrupt vector.
3702 * @param fIntPending Whether the external interrupt is pending or
3703 * acknowdledged in the interrupt controller.
3704 */
3705IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
3706{
3707 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3708 Assert(pVmcs);
3709
3710 /* The VM-exit is subject to "External interrupt exiting" is being set. */
3711 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT)
3712 {
3713 if (fIntPending)
3714 {
3715 /*
3716 * If the interrupt is pending and we don't need to acknowledge the
3717 * interrupt on VM-exit, cause the VM-exit immediately.
3718 *
3719 * See Intel spec 25.2 "Other Causes Of VM Exits".
3720 */
3721 if (!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT))
3722 {
3723 iemVmxVmcsSetExitIntInfo(pVCpu, 0);
3724 iemVmxVmcsSetExitIntErrCode(pVCpu, 0);
3725 iemVmxVmcsSetExitQual(pVCpu, 0);
3726 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT);
3727 }
3728
3729 /*
3730 * If the interrupt is pending and we -do- need to acknowledge the interrupt
3731 * on VM-exit, postpone VM-exit til after the interrupt controller has been
3732 * acknowledged that the interrupt has been consumed.
3733 */
3734 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3735 }
3736
3737 /*
3738 * If the interrupt is no longer pending (i.e. it has been acknowledged) and the
3739 * "External interrupt exiting" and "Acknowledge interrupt on VM-exit" controls are
3740 * all set, we cause the VM-exit now. We need to record the external interrupt that
3741 * just occurred in the VM-exit interruption information field.
3742 *
3743 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events".
3744 */
3745 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
3746 {
3747 uint8_t const fNmiUnblocking = 0; /** @todo NSTVMX: Implement NMI-unblocking due to IRET. */
3748 uint32_t const uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, uVector)
3749 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_EXT_INT)
3750 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, fNmiUnblocking)
3751 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3752 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3753 iemVmxVmcsSetExitIntErrCode(pVCpu, 0);
3754 iemVmxVmcsSetExitQual(pVCpu, 0);
3755 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT);
3756 }
3757 }
3758
3759 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3760}
3761
3762
3763/**
3764 * VMX VM-exit handler for VM-exits due to startup-IPIs (SIPI).
3765 *
3766 * @returns VBox strict status code.
3767 * @param pVCpu The cross context virtual CPU structure.
3768 * @param uVector The SIPI vector.
3769 */
3770IEM_STATIC VBOXSTRICTRC iemVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
3771{
3772 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3773 Assert(pVmcs);
3774
3775 iemVmxVmcsSetExitQual(pVCpu, uVector);
3776 return iemVmxVmexit(pVCpu, VMX_EXIT_SIPI);
3777}
3778
3779
3780/**
3781 * VMX VM-exit handler for VM-exits due to init-IPIs (INIT).
3782 *
3783 * @returns VBox strict status code.
3784 * @param pVCpu The cross context virtual CPU structure.
3785 */
3786IEM_STATIC VBOXSTRICTRC iemVmxVmexitInitIpi(PVMCPU pVCpu)
3787{
3788 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3789 Assert(pVmcs);
3790
3791 iemVmxVmcsSetExitQual(pVCpu, 0);
3792 return iemVmxVmexit(pVCpu, VMX_EXIT_INIT_SIGNAL);
3793}
3794
3795
3796/**
3797 * VMX VM-exit handler for interrupt-window VM-exits.
3798 *
3799 * @returns VBox strict status code.
3800 * @param pVCpu The cross context virtual CPU structure.
3801 */
3802IEM_STATIC VBOXSTRICTRC iemVmxVmexitIntWindow(PVMCPU pVCpu)
3803{
3804 iemVmxVmcsSetExitQual(pVCpu, 0);
3805 return iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW);
3806}
3807
3808
3809/**
3810 * VMX VM-exit handler for VM-exits due to delivery of an event.
3811 *
3812 * @returns VBox strict status code.
3813 * @param pVCpu The cross context virtual CPU structure.
3814 * @param uVector The interrupt / exception vector.
3815 * @param fFlags The flags (see IEM_XCPT_FLAGS_XXX).
3816 * @param uErrCode The error code associated with the event.
3817 * @param uCr2 The CR2 value in case of a \#PF exception.
3818 * @param cbInstr The instruction length in bytes.
3819 */
3820IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2,
3821 uint8_t cbInstr)
3822{
3823 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3824 Assert(pVmcs);
3825
3826 /*
3827 * If the event is being injected as part of VM-entry, it isn't subject to event
3828 * intercepts in the nested-guest. However, secondary exceptions that occur during
3829 * injection of any event -are- subject to event interception.
3830 *
3831 * See Intel spec. 26.5.1.2 "VM Exits During Event Injection".
3832 */
3833 if (!pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents)
3834 {
3835 /* Update the IDT-vectoring event in the VMCS as the source of the upcoming event. */
3836 uint8_t const uIdtVectoringType = iemVmxGetEventType(uVector, fFlags);
3837 uint8_t const fErrCodeValid = (fFlags & IEM_XCPT_FLAGS_ERR);
3838 uint32_t const uIdtVectoringInfo = RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_VECTOR, uVector)
3839 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_TYPE, uIdtVectoringType)
3840 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_ERR_CODE_VALID, fErrCodeValid)
3841 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_VALID, 1);
3842 iemVmxVmcsSetIdtVectoringInfo(pVCpu, uIdtVectoringInfo);
3843 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, uErrCode);
3844
3845 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = true;
3846 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3847 }
3848
3849 /*
3850 * We are injecting an external interrupt, check if we need to cause a VM-exit now.
3851 * If not, the caller will continue delivery of the external interrupt as it would
3852 * normally.
3853 */
3854 if (fFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3855 {
3856 Assert(!VMX_IDT_VECTORING_INFO_IS_VALID(pVmcs->u32RoIdtVectoringInfo));
3857 return iemVmxVmexitExtInt(pVCpu, uVector, false /* fIntPending */);
3858 }
3859
3860 /*
3861 * Evaluate intercepts for hardware exceptions including #BP, #DB, #OF
3862 * generated by INT3, INT1 (ICEBP) and INTO respectively.
3863 */
3864 Assert(fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_SOFT_INT));
3865 bool fIntercept = false;
3866 bool fIsHwXcpt = false;
3867 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3868 || (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR)))
3869 {
3870 fIsHwXcpt = true;
3871 /* NMIs have a dedicated VM-execution control for causing VM-exits. */
3872 if (uVector == X86_XCPT_NMI)
3873 fIntercept = RT_BOOL(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT);
3874 else
3875 {
3876 /* Page-faults are subject to masking using its error code. */
3877 uint32_t fXcptBitmap = pVmcs->u32XcptBitmap;
3878 if (uVector == X86_XCPT_PF)
3879 {
3880 uint32_t const fXcptPFMask = pVmcs->u32XcptPFMask;
3881 uint32_t const fXcptPFMatch = pVmcs->u32XcptPFMatch;
3882 if ((uErrCode & fXcptPFMask) != fXcptPFMatch)
3883 fXcptBitmap ^= RT_BIT(X86_XCPT_PF);
3884 }
3885
3886 /* Consult the exception bitmap for all hardware exceptions (except NMI). */
3887 if (fXcptBitmap & RT_BIT(uVector))
3888 fIntercept = true;
3889 }
3890 }
3891 /* else: Software interrupts cannot be intercepted and therefore do not cause a VM-exit. */
3892
3893 /*
3894 * Now that we've determined whether the software interrupt or hardware exception
3895 * causes a VM-exit, we need to construct the relevant VM-exit information and
3896 * cause the VM-exit.
3897 */
3898 if (fIntercept)
3899 {
3900 Assert(!(fFlags & IEM_XCPT_FLAGS_T_EXT_INT));
3901
3902 /* Construct the rest of the event related information fields and cause the VM-exit. */
3903 uint64_t uExitQual = 0;
3904 if (fIsHwXcpt)
3905 {
3906 if (uVector == X86_XCPT_PF)
3907 uExitQual = uCr2;
3908 else if (uVector == X86_XCPT_DB)
3909 {
3910 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR6);
3911 uExitQual = pVCpu->cpum.GstCtx.dr[6] & VMX_VMCS_EXIT_QUAL_VALID_MASK;
3912 }
3913 }
3914
3915 uint8_t const fNmiUnblocking = 0; /** @todo NSTVMX: Implement NMI-unblocking due to IRET. */
3916 uint8_t const fErrCodeValid = (fFlags & IEM_XCPT_FLAGS_ERR);
3917 uint8_t const uIntInfoType = iemVmxGetEventType(uVector, fFlags);
3918 uint32_t const uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, uVector)
3919 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, uIntInfoType)
3920 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_ERR_CODE_VALID, fErrCodeValid)
3921 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, fNmiUnblocking)
3922 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3923 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3924 iemVmxVmcsSetExitIntErrCode(pVCpu, uErrCode);
3925 iemVmxVmcsSetExitQual(pVCpu, uExitQual);
3926
3927 /*
3928 * For VM exits due to software exceptions (those generated by INT3 or INTO) or privileged
3929 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
3930 * length.
3931 */
3932 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3933 && (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR)))
3934 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
3935 else
3936 iemVmxVmcsSetExitInstrLen(pVCpu, 0);
3937
3938 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI);
3939 }
3940
3941 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3942}
3943
3944
3945/**
3946 * VMX VM-exit handler for VM-exits due to a triple fault.
3947 *
3948 * @returns VBox strict status code.
3949 * @param pVCpu The cross context virtual CPU structure.
3950 */
3951IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu)
3952{
3953 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3954 Assert(pVmcs);
3955 iemVmxVmcsSetExitQual(pVCpu, 0);
3956 return iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT);
3957}
3958
3959
3960/**
3961 * Reads a 32-bit register from the virtual-APIC page at the given offset.
3962 *
3963 * @returns The register from the virtual-APIC page.
3964 * @param pVCpu The cross context virtual CPU structure.
3965 * @param offReg The offset of the register being read.
3966 */
3967DECLINLINE(uint32_t) iemVmxVirtApicReadRaw32(PVMCPU pVCpu, uint16_t offReg)
3968{
3969 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
3970 uint8_t const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
3971 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
3972 uint32_t const uReg = *(const uint32_t *)(pbVirtApic + offReg);
3973 return uReg;
3974}
3975
3976
3977/**
3978 * Reads a 64-bit register from the virtual-APIC page at the given offset.
3979 *
3980 * @returns The register from the virtual-APIC page.
3981 * @param pVCpu The cross context virtual CPU structure.
3982 * @param offReg The offset of the register being read.
3983 */
3984DECLINLINE(uint64_t) iemVmxVirtApicReadRaw64(PVMCPU pVCpu, uint16_t offReg)
3985{
3986 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
3987 uint8_t const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
3988 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
3989 uint64_t const uReg = *(const uint64_t *)(pbVirtApic + offReg);
3990 return uReg;
3991}
3992
3993
3994/**
3995 * Writes a 32-bit register to the virtual-APIC page at the given offset.
3996 *
3997 * @param pVCpu The cross context virtual CPU structure.
3998 * @param offReg The offset of the register being written.
3999 * @param uReg The register value to write.
4000 */
4001DECLINLINE(void) iemVmxVirtApicWriteRaw32(PVMCPU pVCpu, uint16_t offReg, uint32_t uReg)
4002{
4003 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4004 uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4005 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4006 *(uint32_t *)(pbVirtApic + offReg) = uReg;
4007}
4008
4009
4010/**
4011 * Writes a 64-bit register to the virtual-APIC page at the given offset.
4012 *
4013 * @param pVCpu The cross context virtual CPU structure.
4014 * @param offReg The offset of the register being written.
4015 * @param uReg The register value to write.
4016 */
4017DECLINLINE(void) iemVmxVirtApicWriteRaw64(PVMCPU pVCpu, uint16_t offReg, uint64_t uReg)
4018{
4019 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4020 uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4021 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4022 *(uint64_t *)(pbVirtApic + offReg) = uReg;
4023}
4024
4025
4026/**
4027 * Checks if an access of the APIC page must cause an APIC-access VM-exit.
4028 *
4029 * @param pVCpu The cross context virtual CPU structure.
4030 * @param offAccess The offset of the register being accessed.
4031 * @param cbAccess The size of the access in bytes.
4032 * @param fAccess The type of access (must be IEM_ACCESS_TYPE_READ or
4033 * IEM_ACCESS_TYPE_WRITE).
4034 */
4035IEM_STATIC bool iemVmxVirtApicIsAccessIntercepted(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, uint32_t fAccess)
4036{
4037 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4038 Assert(pVmcs);
4039 Assert(fAccess == IEM_ACCESS_TYPE_READ || fAccess == IEM_ACCESS_TYPE_WRITE);
4040
4041 /*
4042 * We must cause a VM-exit if any of the following are true:
4043 * - TPR shadowing isn't active.
4044 * - The access size exceeds 32-bits.
4045 * - The access is not contained within low 4 bytes of a 16-byte aligned offset.
4046 *
4047 * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
4048 * See Intel spec. 29.4.3.1 "Determining Whether a Write Access is Virtualized".
4049 */
4050 if ( !(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4051 || cbAccess > sizeof(uint32_t)
4052 || ((offAccess + cbAccess - 1) & 0xc)
4053 || offAccess >= XAPIC_OFF_END + 4)
4054 return true;
4055
4056 /*
4057 * If the access is part of an operation where we have already
4058 * virtualized a virtual TPR write, we must cause a VM-exit.
4059 */
4060 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_UPDATE_VAPIC))
4061 return true;
4062
4063 /*
4064 * Check write accesses to the APIC-access page that cause VM-exits.
4065 */
4066 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4067 {
4068 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4069 {
4070 /*
4071 * With APIC-register virtualization, a write access to any of the
4072 * following registers are virtualized. Accessing any other register
4073 * causes a VM-exit.
4074 */
4075 uint16_t const offAlignedAccess = offAccess & 0xfffc;
4076 switch (offAlignedAccess)
4077 {
4078 case XAPIC_OFF_ID:
4079 case XAPIC_OFF_TPR:
4080 case XAPIC_OFF_EOI:
4081 case XAPIC_OFF_LDR:
4082 case XAPIC_OFF_DFR:
4083 case XAPIC_OFF_SVR:
4084 case XAPIC_OFF_ESR:
4085 case XAPIC_OFF_ICR_LO:
4086 case XAPIC_OFF_ICR_HI:
4087 case XAPIC_OFF_LVT_TIMER:
4088 case XAPIC_OFF_LVT_THERMAL:
4089 case XAPIC_OFF_LVT_PERF:
4090 case XAPIC_OFF_LVT_LINT0:
4091 case XAPIC_OFF_LVT_LINT1:
4092 case XAPIC_OFF_LVT_ERROR:
4093 case XAPIC_OFF_TIMER_ICR:
4094 case XAPIC_OFF_TIMER_DCR:
4095 break;
4096 default:
4097 return true;
4098 }
4099 }
4100 else if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4101 {
4102 /*
4103 * With virtual-interrupt delivery, a write access to any of the
4104 * following registers are virtualized. Accessing any other register
4105 * causes a VM-exit.
4106 *
4107 * Note! The specification does not allow writing to offsets in-between
4108 * these registers (e.g. TPR + 1 byte) unlike read accesses.
4109 */
4110 switch (offAccess)
4111 {
4112 case XAPIC_OFF_TPR:
4113 case XAPIC_OFF_EOI:
4114 case XAPIC_OFF_ICR_LO:
4115 break;
4116 default:
4117 return true;
4118 }
4119 }
4120 else
4121 {
4122 /*
4123 * Without APIC-register virtualization or virtual-interrupt delivery,
4124 * only TPR accesses are virtualized.
4125 */
4126 if (offAccess == XAPIC_OFF_TPR)
4127 { /* likely */ }
4128 else
4129 return true;
4130 }
4131 }
4132 else
4133 {
4134 /*
4135 * Check read accesses to the APIC-access page that cause VM-exits.
4136 */
4137 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4138 {
4139 /*
4140 * With APIC-register virtualization, a read access to any of the
4141 * following registers are virtualized. Accessing any other register
4142 * causes a VM-exit.
4143 */
4144 uint16_t const offAlignedAccess = offAccess & 0xfffc;
4145 switch (offAlignedAccess)
4146 {
4147 /** @todo r=ramshankar: What about XAPIC_OFF_LVT_CMCI? */
4148 case XAPIC_OFF_ID:
4149 case XAPIC_OFF_VERSION:
4150 case XAPIC_OFF_TPR:
4151 case XAPIC_OFF_EOI:
4152 case XAPIC_OFF_LDR:
4153 case XAPIC_OFF_DFR:
4154 case XAPIC_OFF_SVR:
4155 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
4156 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
4157 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
4158 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
4159 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
4160 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
4161 case XAPIC_OFF_ESR:
4162 case XAPIC_OFF_ICR_LO:
4163 case XAPIC_OFF_ICR_HI:
4164 case XAPIC_OFF_LVT_TIMER:
4165 case XAPIC_OFF_LVT_THERMAL:
4166 case XAPIC_OFF_LVT_PERF:
4167 case XAPIC_OFF_LVT_LINT0:
4168 case XAPIC_OFF_LVT_LINT1:
4169 case XAPIC_OFF_LVT_ERROR:
4170 case XAPIC_OFF_TIMER_ICR:
4171 case XAPIC_OFF_TIMER_DCR:
4172 break;
4173 default:
4174 return true;
4175 }
4176 }
4177 else
4178 {
4179 /* Without APIC-register virtualization, only TPR accesses are virtualized. */
4180 if (offAccess == XAPIC_OFF_TPR)
4181 { /* likely */ }
4182 else
4183 return true;
4184 }
4185 }
4186
4187 /* The APIC-access is virtualized, does not cause a VM-exit. */
4188 return false;
4189}
4190
4191
4192/**
4193 * VMX VM-exit handler for APIC-write VM-exits.
4194 *
4195 * @param pVCpu The cross context virtual CPU structure.
4196 */
4197IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicWrite(PVMCPU pVCpu)
4198{
4199 iemVmxVmcsSetExitQual(pVCpu, pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite);
4200 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_WRITE);
4201}
4202
4203
4204/**
4205 * VMX VM-exit handler for APIC-accesses.
4206 *
4207 * @param pVCpu The cross context virtual CPU structure.
4208 * @param offAccess The offset of the register being accessed.
4209 * @param fAccess The type of access (must contain IEM_ACCESS_TYPE_READ or
4210 * IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION).
4211 */
4212IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess)
4213{
4214 Assert((fAccess & IEM_ACCESS_TYPE_READ) || (fAccess & IEM_ACCESS_TYPE_WRITE) || (fAccess & IEM_ACCESS_INSTRUCTION));
4215
4216 VMXAPICACCESS enmAccess;
4217 bool const fInEventDelivery = IEMGetCurrentXcpt(pVCpu, NULL, NULL, NULL, NULL);
4218 if (fInEventDelivery)
4219 enmAccess = VMXAPICACCESS_LINEAR_EVENT_DELIVERY;
4220 else if (fAccess & IEM_ACCESS_INSTRUCTION)
4221 enmAccess = VMXAPICACCESS_LINEAR_INSTR_FETCH;
4222 else if (fAccess & IEM_ACCESS_TYPE_WRITE)
4223 enmAccess = VMXAPICACCESS_LINEAR_WRITE;
4224 else
4225 enmAccess = VMXAPICACCESS_LINEAR_WRITE;
4226
4227 uint64_t const uExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_OFFSET, offAccess)
4228 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_TYPE, enmAccess);
4229 iemVmxVmcsSetExitQual(pVCpu, uExitQual);
4230 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_ACCESS);
4231}
4232
4233
4234/**
4235 * Virtualizes a memory-based APIC-access.
4236 *
4237 * @returns VBox strict status code.
4238 * @param pVCpu The cross context virtual CPU structure.
4239 * @param offAccess The offset of the register being accessed (within the
4240 * APIC-access page).
4241 * @param cbAccess The size of the access in bytes.
4242 * @param pvData Pointer to the data being read or written.
4243 * @param fAccess The type of access (must contain IEM_ACCESS_TYPE_READ or
4244 * IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION).
4245 */
4246IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData,
4247 uint32_t fAccess)
4248{
4249 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4250 Assert(pVmcs);
4251 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
4252 Assert(pvData);
4253 Assert( (fAccess & IEM_ACCESS_TYPE_READ)
4254 || (fAccess & IEM_ACCESS_TYPE_WRITE)
4255 || (fAccess & IEM_ACCESS_INSTRUCTION));
4256
4257 bool const fIntercept = iemVmxVirtApicIsAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess);
4258 if (fIntercept)
4259 return iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess);
4260
4261 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4262 {
4263 /*
4264 * Record the currently updated APIC offset, as we need this later for figuring
4265 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4266 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4267 */
4268 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offAccess;
4269
4270 /*
4271 * A write access to the APIC-access page that is virtualized (rather than
4272 * causing a VM-exit) writes data to the virtual-APIC page.
4273 */
4274 uint32_t const u32Data = *(uint32_t *)pvData;
4275 iemVmxVirtApicWriteRaw32(pVCpu, offAccess, u32Data);
4276
4277 /*
4278 * After completion of the current operation, we need to perform TPR virtualization,
4279 * EOI virtualization or APIC-write VM-exit depending on which register was written.
4280 *
4281 * The current operation may be a REP-prefixed string instruction, execution of any
4282 * other instruction, or delivery of an event through the IDT.
4283 *
4284 * Thus things like clearing bytes 3:1 of the VTPR, clearing VEOI are not to be
4285 * performed now but later after completion of the current operation.
4286 *
4287 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4288 */
4289 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_UPDATE_VAPIC);
4290 }
4291 else
4292 {
4293 /*
4294 * A read access from the APIC-access page that is virtualized (rather than
4295 * causing a VM-exit) returns data from the virtual-APIC page.
4296 *
4297 * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
4298 */
4299 Assert(cbAccess <= 4);
4300 Assert(offAccess < XAPIC_OFF_END + 4);
4301 static uint32_t const s_auAccessSizeMasks[] = { 0, 0xff, 0xffff, 0xffffff, 0xffffffff };
4302
4303 uint32_t u32Data = iemVmxVirtApicReadRaw32(pVCpu, offAccess);
4304 u32Data &= s_auAccessSizeMasks[cbAccess];
4305 *(uint32_t *)pvData = u32Data;
4306 }
4307
4308 return VINF_VMX_MODIFIES_BEHAVIOR;
4309}
4310
4311
4312/**
4313 * Virtualizes an MSR-based APIC read access.
4314 *
4315 * @returns VBox strict status code.
4316 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR read was virtualized.
4317 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR read access must be
4318 * handled by the x2APIC device.
4319 * @retval VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
4320 * not within the range of valid MSRs, caller must raise \#GP(0).
4321 * @param pVCpu The cross context virtual CPU structure.
4322 * @param idMsr The x2APIC MSR being read.
4323 * @param pu64Value Where to store the read x2APIC MSR value (only valid when
4324 * VINF_VMX_MODIFIES_BEHAVIOR is returned).
4325 */
4326IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value)
4327{
4328 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4329 Assert(pVmcs);
4330 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
4331 Assert(pu64Value);
4332
4333 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4334 {
4335 /*
4336 * Intel has different ideas in the x2APIC spec. vs the VT-x spec. as to
4337 * what the end of the valid x2APIC MSR range is. Hence the use of different
4338 * macros here.
4339 *
4340 * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
4341 * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
4342 */
4343 if ( idMsr >= VMX_V_VIRT_APIC_MSR_START
4344 && idMsr <= VMX_V_VIRT_APIC_MSR_END)
4345 {
4346 uint16_t const offReg = (idMsr & 0xff) << 4;
4347 uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
4348 *pu64Value = u64Value;
4349 return VINF_VMX_MODIFIES_BEHAVIOR;
4350 }
4351 return VERR_OUT_OF_RANGE;
4352 }
4353
4354 if (idMsr == MSR_IA32_X2APIC_TPR)
4355 {
4356 uint16_t const offReg = (idMsr & 0xff) << 4;
4357 uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
4358 *pu64Value = u64Value;
4359 return VINF_VMX_MODIFIES_BEHAVIOR;
4360 }
4361
4362 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4363}
4364
4365
4366/**
4367 * Virtualizes an MSR-based APIC write access.
4368 *
4369 * @returns VBox strict status code.
4370 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR write was virtualized.
4371 * @retval VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
4372 * not within the range of valid MSRs, caller must raise \#GP(0).
4373 *
4374 * @param pVCpu The cross context virtual CPU structure.
4375 * @param idMsr The x2APIC MSR being written.
4376 * @param u64Value The value of the x2APIC MSR being written.
4377 */
4378IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value)
4379{
4380 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4381 Assert(pVmcs);
4382
4383 /*
4384 * Check if the access is to be virtualized.
4385 * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
4386 */
4387 if ( idMsr == MSR_IA32_X2APIC_TPR
4388 || ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4389 && ( idMsr == MSR_IA32_X2APIC_EOI
4390 || idMsr == MSR_IA32_X2APIC_SELF_IPI)))
4391 {
4392 /* Validate the MSR write depending on the register. */
4393 switch (idMsr)
4394 {
4395 case MSR_IA32_X2APIC_TPR:
4396 case MSR_IA32_X2APIC_SELF_IPI:
4397 {
4398 if (u64Value & UINT64_C(0xffffffffffffff00))
4399 return VERR_OUT_OF_RANGE;
4400 break;
4401 }
4402 case MSR_IA32_X2APIC_EOI:
4403 {
4404 if (u64Value != 0)
4405 return VERR_OUT_OF_RANGE;
4406 break;
4407 }
4408 }
4409
4410 /* Write the MSR to the virtual-APIC page. */
4411 uint16_t const offReg = (idMsr & 0xff) << 4;
4412 iemVmxVirtApicWriteRaw64(pVCpu, offReg, u64Value);
4413
4414 /*
4415 * Record the currently updated APIC offset, as we need this later for figuring
4416 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4417 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4418 */
4419 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offReg;
4420 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_UPDATE_VAPIC);
4421
4422 return VINF_VMX_MODIFIES_BEHAVIOR;
4423 }
4424
4425 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4426}
4427
4428
4429/**
4430 * VMX VM-exit handler for PPR virtualization.
4431 *
4432 * @returns VBox strict status code.
4433 * @param pVCpu The cross context virtual CPU structure.
4434 */
4435IEM_STATIC void iemVmxVmexitPprVirtualization(PVMCPU pVCpu)
4436{
4437 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4438 Assert(pVmcs);
4439
4440 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4441 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4442
4443 uint32_t const uVTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4444 uint32_t const uSvi = pVmcs->u16GuestIntStatus >> 8;
4445
4446 uint32_t uVPpr;
4447 if (((uVTpr >> 4) & 0xf) >= ((uSvi >> 4) & 0xf))
4448 uVPpr = uVTpr & 0xff;
4449 else
4450 uVPpr = uSvi & 0xf0;
4451
4452 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_PPR, uVPpr);
4453 Log2(("ppr_virt: uVTpr=%u uSvi=%u -> VM-exit\n", uVTpr, uSvi));
4454}
4455
4456
4457/**
4458 * VMX VM-exit handler for TPR virtualization.
4459 *
4460 * @returns VBox strict status code.
4461 * @param pVCpu The cross context virtual CPU structure.
4462 * @param cbInstr The instruction length in bytes.
4463 */
4464IEM_STATIC VBOXSTRICTRC iemVmxVmexitTprVirtualization(PVMCPU pVCpu, uint8_t cbInstr)
4465{
4466 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4467 Assert(pVmcs);
4468
4469 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4470 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)); /* We don't support virtual-interrupt delivery yet. */
4471 /** @todo NSTVMX: When virtual-interrupt delivery is present, call PPR virt. and
4472 * evaluate pending virtual interrupts. */
4473
4474 uint32_t const uTprThreshold = pVmcs->u32TprThreshold;
4475 uint32_t const uVTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4476
4477 /*
4478 * If the VTPR falls below the TPR threshold, we must cause a VM-exit.
4479 * See Intel spec. 29.1.2 "TPR Virtualization".
4480 */
4481 if (((uVTpr >> 4) & 0xf) < uTprThreshold)
4482 {
4483 Log2(("tpr_virt: uVTpr=%u uTprThreshold=%u -> VM-exit\n", uVTpr, uTprThreshold));
4484
4485 /*
4486 * This is a trap-like VM-exit. We pass the instruction length along in the VM-exit
4487 * instruction length field and let the VM-exit handler update the RIP when appropriate.
4488 * It will then clear the VM-exit instruction length field before completing the VM-exit.
4489 *
4490 * The VM-exit qualification must be cleared.
4491 */
4492 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
4493 iemVmxVmcsSetExitQual(pVCpu, 0);
4494 return iemVmxVmexit(pVCpu, VMX_EXIT_TPR_BELOW_THRESHOLD);
4495 }
4496
4497 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4498}
4499
4500
4501/**
4502 * Checks guest control registers, debug registers and MSRs as part of VM-entry.
4503 *
4504 * @param pVCpu The cross context virtual CPU structure.
4505 * @param pszInstr The VMX instruction name (for logging purposes).
4506 */
4507IEM_STATIC int iemVmxVmentryCheckGuestControlRegsMsrs(PVMCPU pVCpu, const char *pszInstr)
4508{
4509 /*
4510 * Guest Control Registers, Debug Registers, and MSRs.
4511 * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs".
4512 */
4513 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4514 const char *const pszFailure = "VM-exit";
4515 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
4516
4517 /* CR0 reserved bits. */
4518 {
4519 /* CR0 MB1 bits. */
4520 uint64_t u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
4521 Assert(!(u64Cr0Fixed0 & (X86_CR0_NW | X86_CR0_CD)));
4522 if (fUnrestrictedGuest)
4523 u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
4524 if ((pVmcs->u64GuestCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0)
4525 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed0);
4526
4527 /* CR0 MBZ bits. */
4528 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
4529 if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1)
4530 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1);
4531
4532 /* Without unrestricted guest support, VT-x supports does not support unpaged protected mode. */
4533 if ( !fUnrestrictedGuest
4534 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)
4535 && !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
4536 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0PgPe);
4537 }
4538
4539 /* CR4 reserved bits. */
4540 {
4541 /* CR4 MB1 bits. */
4542 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
4543 if ((pVmcs->u64GuestCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
4544 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0);
4545
4546 /* CR4 MBZ bits. */
4547 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
4548 if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1)
4549 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1);
4550 }
4551
4552 /* DEBUGCTL MSR. */
4553 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4554 && (pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL))
4555 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDebugCtl);
4556
4557 /* 64-bit CPU checks. */
4558 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4559 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4560 {
4561 if (fGstInLongMode)
4562 {
4563 /* PAE must be set. */
4564 if ( (pVmcs->u64GuestCr0.u & X86_CR0_PG)
4565 && (pVmcs->u64GuestCr0.u & X86_CR4_PAE))
4566 { /* likely */ }
4567 else
4568 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPae);
4569 }
4570 else
4571 {
4572 /* PCIDE should not be set. */
4573 if (!(pVmcs->u64GuestCr4.u & X86_CR4_PCIDE))
4574 { /* likely */ }
4575 else
4576 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPcide);
4577 }
4578
4579 /* CR3. */
4580 if (!(pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
4581 { /* likely */ }
4582 else
4583 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr3);
4584
4585 /* DR7. */
4586 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4587 && (pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK))
4588 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDr7);
4589
4590 /* SYSENTER ESP and SYSENTER EIP. */
4591 if ( X86_IS_CANONICAL(pVmcs->u64GuestSysenterEsp.u)
4592 && X86_IS_CANONICAL(pVmcs->u64GuestSysenterEip.u))
4593 { /* likely */ }
4594 else
4595 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSysenterEspEip);
4596 }
4597
4598 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
4599 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
4600
4601 /* PAT MSR. */
4602 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4603 && !CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u))
4604 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPatMsr);
4605
4606 /* EFER MSR. */
4607 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
4608 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4609 && (pVmcs->u64GuestEferMsr.u & ~uValidEferMask))
4610 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsrRsvd);
4611
4612 bool const fGstLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
4613 bool const fGstLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
4614 if ( fGstInLongMode == fGstLma
4615 && ( !(pVmcs->u64GuestCr0.u & X86_CR0_PG)
4616 || fGstLma == fGstLme))
4617 { /* likely */ }
4618 else
4619 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsr);
4620
4621 /* We don't support IA32_BNDCFGS MSR yet. */
4622 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
4623
4624 NOREF(pszInstr);
4625 NOREF(pszFailure);
4626 return VINF_SUCCESS;
4627}
4628
4629
4630/**
4631 * Checks guest segment registers, LDTR and TR as part of VM-entry.
4632 *
4633 * @param pVCpu The cross context virtual CPU structure.
4634 * @param pszInstr The VMX instruction name (for logging purposes).
4635 */
4636IEM_STATIC int iemVmxVmentryCheckGuestSegRegs(PVMCPU pVCpu, const char *pszInstr)
4637{
4638 /*
4639 * Segment registers.
4640 * See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
4641 */
4642 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4643 const char *const pszFailure = "VM-exit";
4644 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);
4645 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
4646 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4647
4648 /* Selectors. */
4649 if ( !fGstInV86Mode
4650 && !fUnrestrictedGuest
4651 && (pVmcs->GuestSs & X86_SEL_RPL) != (pVmcs->GuestCs & X86_SEL_RPL))
4652 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelCsSsRpl);
4653
4654 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
4655 {
4656 CPUMSELREG SelReg;
4657 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &SelReg);
4658 if (RT_LIKELY(rc == VINF_SUCCESS))
4659 { /* likely */ }
4660 else
4661 return rc;
4662
4663 /*
4664 * Virtual-8086 mode checks.
4665 */
4666 if (fGstInV86Mode)
4667 {
4668 /* Base address. */
4669 if (SelReg.u64Base == (uint64_t)SelReg.Sel << 4)
4670 { /* likely */ }
4671 else
4672 {
4673 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBaseV86(iSegReg);
4674 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4675 }
4676
4677 /* Limit. */
4678 if (SelReg.u32Limit == 0xffff)
4679 { /* likely */ }
4680 else
4681 {
4682 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegLimitV86(iSegReg);
4683 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4684 }
4685
4686 /* Attribute. */
4687 if (SelReg.Attr.u == 0xf3)
4688 { /* likely */ }
4689 else
4690 {
4691 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrV86(iSegReg);
4692 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4693 }
4694
4695 /* We're done; move to checking the next segment. */
4696 continue;
4697 }
4698
4699 /* Checks done by 64-bit CPUs. */
4700 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4701 {
4702 /* Base address. */
4703 if ( iSegReg == X86_SREG_FS
4704 || iSegReg == X86_SREG_GS)
4705 {
4706 if (X86_IS_CANONICAL(SelReg.u64Base))
4707 { /* likely */ }
4708 else
4709 {
4710 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
4711 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4712 }
4713 }
4714 else if (iSegReg == X86_SREG_CS)
4715 {
4716 if (!RT_HI_U32(SelReg.u64Base))
4717 { /* likely */ }
4718 else
4719 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseCs);
4720 }
4721 else
4722 {
4723 if ( SelReg.Attr.n.u1Unusable
4724 || !RT_HI_U32(SelReg.u64Base))
4725 { /* likely */ }
4726 else
4727 {
4728 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
4729 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4730 }
4731 }
4732 }
4733
4734 /*
4735 * Checks outside Virtual-8086 mode.
4736 */
4737 uint8_t const uSegType = SelReg.Attr.n.u4Type;
4738 uint8_t const fCodeDataSeg = SelReg.Attr.n.u1DescType;
4739 uint8_t const fUsable = !SelReg.Attr.n.u1Unusable;
4740 uint8_t const uDpl = SelReg.Attr.n.u2Dpl;
4741 uint8_t const fPresent = SelReg.Attr.n.u1Present;
4742 uint8_t const uGranularity = SelReg.Attr.n.u1Granularity;
4743 uint8_t const uDefBig = SelReg.Attr.n.u1DefBig;
4744 uint8_t const fSegLong = SelReg.Attr.n.u1Long;
4745
4746 /* Code or usable segment. */
4747 if ( iSegReg == X86_SREG_CS
4748 || fUsable)
4749 {
4750 /* Reserved bits (bits 31:17 and bits 11:8). */
4751 if (!(SelReg.Attr.u & 0xfffe0f00))
4752 { /* likely */ }
4753 else
4754 {
4755 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrRsvd(iSegReg);
4756 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4757 }
4758
4759 /* Descriptor type. */
4760 if (fCodeDataSeg)
4761 { /* likely */ }
4762 else
4763 {
4764 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDescType(iSegReg);
4765 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4766 }
4767
4768 /* Present. */
4769 if (fPresent)
4770 { /* likely */ }
4771 else
4772 {
4773 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrPresent(iSegReg);
4774 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4775 }
4776
4777 /* Granularity. */
4778 if ( ((SelReg.u32Limit & 0x00000fff) == 0x00000fff || !uGranularity)
4779 && ((SelReg.u32Limit & 0xfff00000) == 0x00000000 || uGranularity))
4780 { /* likely */ }
4781 else
4782 {
4783 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrGran(iSegReg);
4784 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4785 }
4786 }
4787
4788 if (iSegReg == X86_SREG_CS)
4789 {
4790 /* Segment Type and DPL. */
4791 if ( uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
4792 && fUnrestrictedGuest)
4793 {
4794 if (uDpl == 0)
4795 { /* likely */ }
4796 else
4797 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplZero);
4798 }
4799 else if ( uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_ACCESSED)
4800 || uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
4801 {
4802 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
4803 if (uDpl == AttrSs.n.u2Dpl)
4804 { /* likely */ }
4805 else
4806 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplEqSs);
4807 }
4808 else if ((uSegType & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
4809 == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
4810 {
4811 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
4812 if (uDpl <= AttrSs.n.u2Dpl)
4813 { /* likely */ }
4814 else
4815 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplLtSs);
4816 }
4817 else
4818 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsType);
4819
4820 /* Def/Big. */
4821 if ( fGstInLongMode
4822 && fSegLong)
4823 {
4824 if (uDefBig == 0)
4825 { /* likely */ }
4826 else
4827 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDefBig);
4828 }
4829 }
4830 else if (iSegReg == X86_SREG_SS)
4831 {
4832 /* Segment Type. */
4833 if ( !fUsable
4834 || uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
4835 || uSegType == (X86_SEL_TYPE_DOWN | X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED))
4836 { /* likely */ }
4837 else
4838 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsType);
4839
4840 /* DPL. */
4841 if (fUnrestrictedGuest)
4842 {
4843 if (uDpl == (SelReg.Sel & X86_SEL_RPL))
4844 { /* likely */ }
4845 else
4846 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplEqRpl);
4847 }
4848 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
4849 if ( AttrCs.n.u4Type == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
4850 || (pVmcs->u64GuestCr0.u & X86_CR0_PE))
4851 {
4852 if (uDpl == 0)
4853 { /* likely */ }
4854 else
4855 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplZero);
4856 }
4857 }
4858 else
4859 {
4860 /* DS, ES, FS, GS. */
4861 if (fUsable)
4862 {
4863 /* Segment type. */
4864 if (uSegType & X86_SEL_TYPE_ACCESSED)
4865 { /* likely */ }
4866 else
4867 {
4868 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrTypeAcc(iSegReg);
4869 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4870 }
4871
4872 if ( !(uSegType & X86_SEL_TYPE_CODE)
4873 || (uSegType & X86_SEL_TYPE_READ))
4874 { /* likely */ }
4875 else
4876 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsTypeRead);
4877
4878 /* DPL. */
4879 if ( !fUnrestrictedGuest
4880 && uSegType <= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
4881 {
4882 if (uDpl >= (SelReg.Sel & X86_SEL_RPL))
4883 { /* likely */ }
4884 else
4885 {
4886 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDplRpl(iSegReg);
4887 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4888 }
4889 }
4890 }
4891 }
4892 }
4893
4894 /*
4895 * LDTR.
4896 */
4897 {
4898 CPUMSELREG Ldtr;
4899 Ldtr.Sel = pVmcs->GuestLdtr;
4900 Ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
4901 Ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
4902 Ldtr.Attr.u = pVmcs->u32GuestLdtrLimit;
4903
4904 if (!Ldtr.Attr.n.u1Unusable)
4905 {
4906 /* Selector. */
4907 if (!(Ldtr.Sel & X86_SEL_LDT))
4908 { /* likely */ }
4909 else
4910 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelLdtr);
4911
4912 /* Base. */
4913 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4914 {
4915 if (X86_IS_CANONICAL(Ldtr.u64Base))
4916 { /* likely */ }
4917 else
4918 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseLdtr);
4919 }
4920
4921 /* Attributes. */
4922 /* Reserved bits (bits 31:17 and bits 11:8). */
4923 if (!(Ldtr.Attr.u & 0xfffe0f00))
4924 { /* likely */ }
4925 else
4926 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrRsvd);
4927
4928 if (Ldtr.Attr.n.u4Type == X86_SEL_TYPE_SYS_LDT)
4929 { /* likely */ }
4930 else
4931 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrType);
4932
4933 if (!Ldtr.Attr.n.u1DescType)
4934 { /* likely */ }
4935 else
4936 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrDescType);
4937
4938 if (Ldtr.Attr.n.u1Present)
4939 { /* likely */ }
4940 else
4941 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrPresent);
4942
4943 if ( ((Ldtr.u32Limit & 0x00000fff) == 0x00000fff || !Ldtr.Attr.n.u1Granularity)
4944 && ((Ldtr.u32Limit & 0xfff00000) == 0x00000000 || Ldtr.Attr.n.u1Granularity))
4945 { /* likely */ }
4946 else
4947 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrGran);
4948 }
4949 }
4950
4951 /*
4952 * TR.
4953 */
4954 {
4955 CPUMSELREG Tr;
4956 Tr.Sel = pVmcs->GuestTr;
4957 Tr.u32Limit = pVmcs->u32GuestTrLimit;
4958 Tr.u64Base = pVmcs->u64GuestTrBase.u;
4959 Tr.Attr.u = pVmcs->u32GuestTrLimit;
4960
4961 /* Selector. */
4962 if (!(Tr.Sel & X86_SEL_LDT))
4963 { /* likely */ }
4964 else
4965 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelTr);
4966
4967 /* Base. */
4968 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4969 {
4970 if (X86_IS_CANONICAL(Tr.u64Base))
4971 { /* likely */ }
4972 else
4973 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseTr);
4974 }
4975
4976 /* Attributes. */
4977 /* Reserved bits (bits 31:17 and bits 11:8). */
4978 if (!(Tr.Attr.u & 0xfffe0f00))
4979 { /* likely */ }
4980 else
4981 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrRsvd);
4982
4983 if (!Tr.Attr.n.u1Unusable)
4984 { /* likely */ }
4985 else
4986 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrUnusable);
4987
4988 if ( Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY
4989 || ( !fGstInLongMode
4990 && Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY))
4991 { /* likely */ }
4992 else
4993 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrType);
4994
4995 if (!Tr.Attr.n.u1DescType)
4996 { /* likely */ }
4997 else
4998 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrDescType);
4999
5000 if (Tr.Attr.n.u1Present)
5001 { /* likely */ }
5002 else
5003 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrPresent);
5004
5005 if ( ((Tr.u32Limit & 0x00000fff) == 0x00000fff || !Tr.Attr.n.u1Granularity)
5006 && ((Tr.u32Limit & 0xfff00000) == 0x00000000 || Tr.Attr.n.u1Granularity))
5007 { /* likely */ }
5008 else
5009 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrGran);
5010 }
5011
5012 NOREF(pszInstr);
5013 NOREF(pszFailure);
5014 return VINF_SUCCESS;
5015}
5016
5017
5018/**
5019 * Checks guest GDTR and IDTR as part of VM-entry.
5020 *
5021 * @param pVCpu The cross context virtual CPU structure.
5022 * @param pszInstr The VMX instruction name (for logging purposes).
5023 */
5024IEM_STATIC int iemVmxVmentryCheckGuestGdtrIdtr(PVMCPU pVCpu, const char *pszInstr)
5025{
5026 /*
5027 * GDTR and IDTR.
5028 * See Intel spec. 26.3.1.3 "Checks on Guest Descriptor-Table Registers".
5029 */
5030 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5031 const char *const pszFailure = "VM-exit";
5032
5033 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5034 {
5035 /* Base. */
5036 if (X86_IS_CANONICAL(pVmcs->u64GuestGdtrBase.u))
5037 { /* likely */ }
5038 else
5039 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrBase);
5040
5041 if (X86_IS_CANONICAL(pVmcs->u64GuestIdtrBase.u))
5042 { /* likely */ }
5043 else
5044 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrBase);
5045 }
5046
5047 /* Limit. */
5048 if (!RT_HI_U16(pVmcs->u32GuestGdtrLimit))
5049 { /* likely */ }
5050 else
5051 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrLimit);
5052
5053 if (!RT_HI_U16(pVmcs->u32GuestIdtrLimit))
5054 { /* likely */ }
5055 else
5056 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrLimit);
5057
5058 NOREF(pszInstr);
5059 NOREF(pszFailure);
5060 return VINF_SUCCESS;
5061}
5062
5063
5064/**
5065 * Checks guest RIP and RFLAGS as part of VM-entry.
5066 *
5067 * @param pVCpu The cross context virtual CPU structure.
5068 * @param pszInstr The VMX instruction name (for logging purposes).
5069 */
5070IEM_STATIC int iemVmxVmentryCheckGuestRipRFlags(PVMCPU pVCpu, const char *pszInstr)
5071{
5072 /*
5073 * RIP and RFLAGS.
5074 * See Intel spec. 26.3.1.4 "Checks on Guest RIP and RFLAGS".
5075 */
5076 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5077 const char *const pszFailure = "VM-exit";
5078 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5079
5080 /* RIP. */
5081 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5082 {
5083 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
5084 if ( !fGstInLongMode
5085 || !AttrCs.n.u1Long)
5086 {
5087 if (!RT_HI_U32(pVmcs->u64GuestRip.u))
5088 { /* likely */ }
5089 else
5090 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRipRsvd);
5091 }
5092
5093 if ( fGstInLongMode
5094 && AttrCs.n.u1Long)
5095 {
5096 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth == 48); /* Canonical. */
5097 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth < 64
5098 && X86_IS_CANONICAL(pVmcs->u64GuestRip.u))
5099 { /* likely */ }
5100 else
5101 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRip);
5102 }
5103 }
5104
5105 /* RFLAGS (bits 63:22 (or 31:22), bits 15, 5, 3 are reserved, bit 1 MB1). */
5106 uint64_t const uGuestRFlags = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode ? pVmcs->u64GuestRFlags.u
5107 : pVmcs->u64GuestRFlags.s.Lo;
5108 if ( !(uGuestRFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK))
5109 && (uGuestRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK)
5110 { /* likely */ }
5111 else
5112 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsRsvd);
5113
5114 if ( fGstInLongMode
5115 || !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
5116 {
5117 if (!(uGuestRFlags & X86_EFL_VM))
5118 { /* likely */ }
5119 else
5120 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsVm);
5121 }
5122
5123 if ( VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo)
5124 && VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo) == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5125 {
5126 if (uGuestRFlags & X86_EFL_IF)
5127 { /* likely */ }
5128 else
5129 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsIf);
5130 }
5131
5132 NOREF(pszInstr);
5133 NOREF(pszFailure);
5134 return VINF_SUCCESS;
5135}
5136
5137
5138/**
5139 * Checks guest non-register state as part of VM-entry.
5140 *
5141 * @param pVCpu The cross context virtual CPU structure.
5142 * @param pszInstr The VMX instruction name (for logging purposes).
5143 */
5144IEM_STATIC int iemVmxVmentryCheckGuestNonRegState(PVMCPU pVCpu, const char *pszInstr)
5145{
5146 /*
5147 * Guest non-register state.
5148 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
5149 */
5150 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5151 const char *const pszFailure = "VM-exit";
5152
5153 /*
5154 * Activity state.
5155 */
5156 uint64_t const u64GuestVmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
5157 uint32_t const fActivityStateMask = RT_BF_GET(u64GuestVmxMiscMsr, VMX_BF_MISC_ACTIVITY_STATES);
5158 if (!(pVmcs->u32GuestActivityState & fActivityStateMask))
5159 { /* likely */ }
5160 else
5161 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateRsvd);
5162
5163 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5164 if ( !AttrSs.n.u2Dpl
5165 || pVmcs->u32GuestActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT)
5166 { /* likely */ }
5167 else
5168 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateSsDpl);
5169
5170 if ( pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI
5171 || pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5172 {
5173 if (pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE)
5174 { /* likely */ }
5175 else
5176 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateStiMovSs);
5177 }
5178
5179 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
5180 {
5181 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
5182 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(pVmcs->u32EntryIntInfo);
5183 AssertCompile(VMX_V_GUEST_ACTIVITY_STATE_MASK == (VMX_VMCS_GUEST_ACTIVITY_HLT | VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN));
5184 switch (pVmcs->u32GuestActivityState)
5185 {
5186 case VMX_VMCS_GUEST_ACTIVITY_HLT:
5187 {
5188 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT
5189 || uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
5190 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5191 && ( uVector == X86_XCPT_DB
5192 || uVector == X86_XCPT_MC))
5193 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
5194 && uVector == VMX_ENTRY_INT_INFO_VECTOR_MTF))
5195 { /* likely */ }
5196 else
5197 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateHlt);
5198 break;
5199 }
5200
5201 case VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN:
5202 {
5203 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
5204 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5205 && uVector == X86_XCPT_MC))
5206 { /* likely */ }
5207 else
5208 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateShutdown);
5209 break;
5210 }
5211
5212 case VMX_VMCS_GUEST_ACTIVITY_ACTIVE:
5213 default:
5214 break;
5215 }
5216 }
5217
5218 /*
5219 * Interruptibility state.
5220 */
5221 if (!(pVmcs->u32GuestIntrState & ~VMX_VMCS_GUEST_INT_STATE_MASK))
5222 { /* likely */ }
5223 else
5224 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRsvd);
5225
5226 if ((pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5227 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5228 { /* likely */ }
5229 else
5230 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateStiMovSs);
5231
5232 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_IF)
5233 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5234 { /* likely */ }
5235 else
5236 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRFlagsSti);
5237
5238 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
5239 {
5240 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
5241 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5242 {
5243 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
5244 { /* likely */ }
5245 else
5246 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateExtInt);
5247 }
5248 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5249 {
5250 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
5251 { /* likely */ }
5252 else
5253 {
5254 /*
5255 * We don't support injecting NMIs when blocking-by-STI would be in effect.
5256 * We update the VM-exit qualification only when blocking-by-STI is set
5257 * without blocking-by-MovSS being set. Although in practise it does not
5258 * make much difference since the order of checks are implementation defined.
5259 */
5260 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5261 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_NMI_INJECT);
5262 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateNmi);
5263 }
5264
5265 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5266 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI))
5267 { /* likely */ }
5268 else
5269 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateVirtNmi);
5270 }
5271 }
5272
5273 /* We don't support SMM yet. So blocking-by-SMIs must not be set. */
5274 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI))
5275 { /* likely */ }
5276 else
5277 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateSmi);
5278
5279 /* We don't support SGX yet. So enclave-interruption must not be set. */
5280 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_ENCLAVE))
5281 { /* likely */ }
5282 else
5283 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateEnclave);
5284
5285 /*
5286 * Pending debug exceptions.
5287 */
5288 uint64_t const uPendingDbgXcpt = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode
5289 ? pVmcs->u64GuestPendingDbgXcpt.u
5290 : pVmcs->u64GuestPendingDbgXcpt.s.Lo;
5291 if (!(uPendingDbgXcpt & ~VMX_VMCS_GUEST_PENDING_DEBUG_VALID_MASK))
5292 { /* likely */ }
5293 else
5294 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRsvd);
5295
5296 if ( (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5297 || pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5298 {
5299 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_TF)
5300 && !(pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF)
5301 && !(uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
5302 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf);
5303
5304 if ( ( !(pVmcs->u64GuestRFlags.u & X86_EFL_TF)
5305 || (pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF))
5306 && (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
5307 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf);
5308 }
5309
5310 /* We don't support RTM (Real-time Transactional Memory) yet. */
5311 if (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_RTM)
5312 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRtm);
5313
5314 /*
5315 * VMCS link pointer.
5316 */
5317 if (pVmcs->u64VmcsLinkPtr.u != UINT64_C(0xffffffffffffffff))
5318 {
5319 RTGCPHYS const GCPhysShadowVmcs = pVmcs->u64VmcsLinkPtr.u;
5320 /* We don't support SMM yet (so VMCS link pointer cannot be the current VMCS). */
5321 if (GCPhysShadowVmcs != IEM_VMX_GET_CURRENT_VMCS(pVCpu))
5322 { /* likely */ }
5323 else
5324 {
5325 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5326 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrCurVmcs);
5327 }
5328
5329 /* Validate the address. */
5330 if ( (GCPhysShadowVmcs & X86_PAGE_4K_OFFSET_MASK)
5331 || (GCPhysShadowVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5332 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysShadowVmcs))
5333 {
5334 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5335 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmcsLinkPtr);
5336 }
5337
5338 /* Read the VMCS-link pointer from guest memory. */
5339 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs));
5340 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs),
5341 GCPhysShadowVmcs, VMX_V_VMCS_SIZE);
5342 if (RT_FAILURE(rc))
5343 {
5344 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5345 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys);
5346 }
5347
5348 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
5349 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID)
5350 { /* likely */ }
5351 else
5352 {
5353 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5354 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrRevId);
5355 }
5356
5357 /* Verify the shadow bit is set if VMCS shadowing is enabled . */
5358 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
5359 || pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.fIsShadowVmcs)
5360 { /* likely */ }
5361 else
5362 {
5363 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5364 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrShadow);
5365 }
5366
5367 /* Finally update our cache of the guest physical address of the shadow VMCS. */
5368 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs = GCPhysShadowVmcs;
5369 }
5370
5371 NOREF(pszInstr);
5372 NOREF(pszFailure);
5373 return VINF_SUCCESS;
5374}
5375
5376
5377/**
5378 * Checks if the PDPTEs referenced by the nested-guest CR3 are valid as part of
5379 * VM-entry.
5380 *
5381 * @returns @c true if all PDPTEs are valid, @c false otherwise.
5382 * @param pVCpu The cross context virtual CPU structure.
5383 * @param pszInstr The VMX instruction name (for logging purposes).
5384 * @param pVmcs Pointer to the virtual VMCS.
5385 */
5386IEM_STATIC int iemVmxVmentryCheckGuestPdptesForCr3(PVMCPU pVCpu, const char *pszInstr, PVMXVVMCS pVmcs)
5387{
5388 /*
5389 * Check PDPTEs.
5390 * See Intel spec. 4.4.1 "PDPTE Registers".
5391 */
5392 uint64_t const uGuestCr3 = pVmcs->u64GuestCr3.u & X86_CR3_PAE_PAGE_MASK;
5393 const char *const pszFailure = "VM-exit";
5394
5395 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
5396 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uGuestCr3, sizeof(aPdptes));
5397 if (RT_SUCCESS(rc))
5398 {
5399 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
5400 {
5401 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
5402 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
5403 { /* likely */ }
5404 else
5405 {
5406 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
5407 VMXVDIAG const enmDiag = iemVmxGetDiagVmentryPdpteRsvd(iPdpte);
5408 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5409 }
5410 }
5411 }
5412 else
5413 {
5414 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
5415 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys);
5416 }
5417
5418 NOREF(pszFailure);
5419 return rc;
5420}
5421
5422
5423/**
5424 * Checks guest PDPTEs as part of VM-entry.
5425 *
5426 * @param pVCpu The cross context virtual CPU structure.
5427 * @param pszInstr The VMX instruction name (for logging purposes).
5428 */
5429IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPU pVCpu, const char *pszInstr)
5430{
5431 /*
5432 * Guest PDPTEs.
5433 * See Intel spec. 26.3.1.5 "Checks on Guest Page-Directory-Pointer-Table Entries".
5434 */
5435 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5436 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5437
5438 /* Check PDPTes if the VM-entry is to a guest using PAE paging. */
5439 int rc;
5440 if ( !fGstInLongMode
5441 && (pVmcs->u64GuestCr4.u & X86_CR4_PAE)
5442 && (pVmcs->u64GuestCr0.u & X86_CR0_PG))
5443 {
5444 /*
5445 * We don't support nested-paging for nested-guests yet.
5446 *
5447 * Without nested-paging for nested-guests, PDPTEs in the VMCS are not used,
5448 * rather we need to check the PDPTEs referenced by the guest CR3.
5449 */
5450 rc = iemVmxVmentryCheckGuestPdptesForCr3(pVCpu, pszInstr, pVmcs);
5451 }
5452 else
5453 rc = VINF_SUCCESS;
5454 return rc;
5455}
5456
5457
5458/**
5459 * Checks guest-state as part of VM-entry.
5460 *
5461 * @returns VBox status code.
5462 * @param pVCpu The cross context virtual CPU structure.
5463 * @param pszInstr The VMX instruction name (for logging purposes).
5464 */
5465IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr)
5466{
5467 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
5468 if (RT_SUCCESS(rc))
5469 {
5470 rc = iemVmxVmentryCheckGuestSegRegs(pVCpu, pszInstr);
5471 if (RT_SUCCESS(rc))
5472 {
5473 rc = iemVmxVmentryCheckGuestGdtrIdtr(pVCpu, pszInstr);
5474 if (RT_SUCCESS(rc))
5475 {
5476 rc = iemVmxVmentryCheckGuestRipRFlags(pVCpu, pszInstr);
5477 if (RT_SUCCESS(rc))
5478 {
5479 rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr);
5480 if (RT_SUCCESS(rc))
5481 return iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr);
5482 }
5483 }
5484 }
5485 }
5486 return rc;
5487}
5488
5489
5490/**
5491 * Checks host-state as part of VM-entry.
5492 *
5493 * @returns VBox status code.
5494 * @param pVCpu The cross context virtual CPU structure.
5495 * @param pszInstr The VMX instruction name (for logging purposes).
5496 */
5497IEM_STATIC int iemVmxVmentryCheckHostState(PVMCPU pVCpu, const char *pszInstr)
5498{
5499 /*
5500 * Host Control Registers and MSRs.
5501 * See Intel spec. 26.2.2 "Checks on Host Control Registers and MSRs".
5502 */
5503 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5504 const char * const pszFailure = "VMFail";
5505
5506 /* CR0 reserved bits. */
5507 {
5508 /* CR0 MB1 bits. */
5509 uint64_t const u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
5510 if ((pVmcs->u64HostCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0)
5511 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0);
5512
5513 /* CR0 MBZ bits. */
5514 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
5515 if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1)
5516 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1);
5517 }
5518
5519 /* CR4 reserved bits. */
5520 {
5521 /* CR4 MB1 bits. */
5522 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
5523 if ((pVmcs->u64HostCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
5524 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0);
5525
5526 /* CR4 MBZ bits. */
5527 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
5528 if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1)
5529 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1);
5530 }
5531
5532 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5533 {
5534 /* CR3 reserved bits. */
5535 if (!(pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
5536 { /* likely */ }
5537 else
5538 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr3);
5539
5540 /* SYSENTER ESP and SYSENTER EIP. */
5541 if ( X86_IS_CANONICAL(pVmcs->u64HostSysenterEsp.u)
5542 && X86_IS_CANONICAL(pVmcs->u64HostSysenterEip.u))
5543 { /* likely */ }
5544 else
5545 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSysenterEspEip);
5546 }
5547
5548 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
5549 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PERF_MSR));
5550
5551 /* PAT MSR. */
5552 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
5553 || CPUMIsPatMsrValid(pVmcs->u64HostPatMsr.u))
5554 { /* likely */ }
5555 else
5556 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostPatMsr);
5557
5558 /* EFER MSR. */
5559 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
5560 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
5561 || !(pVmcs->u64HostEferMsr.u & ~uValidEferMask))
5562 { /* likely */ }
5563 else
5564 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsrRsvd);
5565
5566 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
5567 bool const fHostLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
5568 bool const fHostLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
5569 if ( fHostInLongMode == fHostLma
5570 && fHostInLongMode == fHostLme)
5571 { /* likely */ }
5572 else
5573 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsr);
5574
5575 /*
5576 * Host Segment and Descriptor-Table Registers.
5577 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
5578 */
5579 /* Selector RPL and TI. */
5580 if ( !(pVmcs->HostCs & (X86_SEL_RPL | X86_SEL_LDT))
5581 && !(pVmcs->HostSs & (X86_SEL_RPL | X86_SEL_LDT))
5582 && !(pVmcs->HostDs & (X86_SEL_RPL | X86_SEL_LDT))
5583 && !(pVmcs->HostEs & (X86_SEL_RPL | X86_SEL_LDT))
5584 && !(pVmcs->HostFs & (X86_SEL_RPL | X86_SEL_LDT))
5585 && !(pVmcs->HostGs & (X86_SEL_RPL | X86_SEL_LDT))
5586 && !(pVmcs->HostTr & (X86_SEL_RPL | X86_SEL_LDT)))
5587 { /* likely */ }
5588 else
5589 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSel);
5590
5591 /* CS and TR selectors cannot be 0. */
5592 if ( pVmcs->HostCs
5593 && pVmcs->HostTr)
5594 { /* likely */ }
5595 else
5596 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCsTr);
5597
5598 /* SS cannot be 0 if 32-bit host. */
5599 if ( fHostInLongMode
5600 || pVmcs->HostSs)
5601 { /* likely */ }
5602 else
5603 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSs);
5604
5605 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5606 {
5607 /* FS, GS, GDTR, IDTR, TR base address. */
5608 if ( X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
5609 && X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
5610 && X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u)
5611 && X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u)
5612 && X86_IS_CANONICAL(pVmcs->u64HostTrBase.u))
5613 { /* likely */ }
5614 else
5615 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSegBase);
5616 }
5617
5618 /*
5619 * Host address-space size for 64-bit CPUs.
5620 * See Intel spec. 26.2.4 "Checks Related to Address-Space Size".
5621 */
5622 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5623 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5624 {
5625 bool const fCpuInLongMode = CPUMIsGuestInLongMode(pVCpu);
5626
5627 /* Logical processor in IA-32e mode. */
5628 if (fCpuInLongMode)
5629 {
5630 if (fHostInLongMode)
5631 {
5632 /* PAE must be set. */
5633 if (pVmcs->u64HostCr4.u & X86_CR4_PAE)
5634 { /* likely */ }
5635 else
5636 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pae);
5637
5638 /* RIP must be canonical. */
5639 if (X86_IS_CANONICAL(pVmcs->u64HostRip.u))
5640 { /* likely */ }
5641 else
5642 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRip);
5643 }
5644 else
5645 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostLongMode);
5646 }
5647 else
5648 {
5649 /* Logical processor is outside IA-32e mode. */
5650 if ( !fGstInLongMode
5651 && !fHostInLongMode)
5652 {
5653 /* PCIDE should not be set. */
5654 if (!(pVmcs->u64HostCr4.u & X86_CR4_PCIDE))
5655 { /* likely */ }
5656 else
5657 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pcide);
5658
5659 /* The high 32-bits of RIP MBZ. */
5660 if (!pVmcs->u64HostRip.s.Hi)
5661 { /* likely */ }
5662 else
5663 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRipRsvd);
5664 }
5665 else
5666 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongMode);
5667 }
5668 }
5669 else
5670 {
5671 /* Host address-space size for 32-bit CPUs. */
5672 if ( !fGstInLongMode
5673 && !fHostInLongMode)
5674 { /* likely */ }
5675 else
5676 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongModeNoCpu);
5677 }
5678
5679 NOREF(pszInstr);
5680 NOREF(pszFailure);
5681 return VINF_SUCCESS;
5682}
5683
5684
5685/**
5686 * Checks VM-entry controls fields as part of VM-entry.
5687 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
5688 *
5689 * @returns VBox status code.
5690 * @param pVCpu The cross context virtual CPU structure.
5691 * @param pszInstr The VMX instruction name (for logging purposes).
5692 */
5693IEM_STATIC int iemVmxVmentryCheckEntryCtls(PVMCPU pVCpu, const char *pszInstr)
5694{
5695 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5696 const char * const pszFailure = "VMFail";
5697
5698 /* VM-entry controls. */
5699 VMXCTLSMSR EntryCtls;
5700 EntryCtls.u = CPUMGetGuestIa32VmxEntryCtls(pVCpu);
5701 if (~pVmcs->u32EntryCtls & EntryCtls.n.disallowed0)
5702 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
5703
5704 if (pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1)
5705 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1);
5706
5707 /* Event injection. */
5708 uint32_t const uIntInfo = pVmcs->u32EntryIntInfo;
5709 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VALID))
5710 {
5711 /* Type and vector. */
5712 uint8_t const uType = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_TYPE);
5713 uint8_t const uVector = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VECTOR);
5714 uint8_t const uRsvd = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_RSVD_12_30);
5715 if ( !uRsvd
5716 && HMVmxIsEntryIntInfoTypeValid(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxMonitorTrapFlag, uType)
5717 && HMVmxIsEntryIntInfoVectorValid(uVector, uType))
5718 { /* likely */ }
5719 else
5720 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd);
5721
5722 /* Exception error code. */
5723 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID))
5724 {
5725 /* Delivery possible only in Unrestricted-guest mode when CR0.PE is set. */
5726 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
5727 || (pVmcs->u64GuestCr0.s.Lo & X86_CR0_PE))
5728 { /* likely */ }
5729 else
5730 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodePe);
5731
5732 /* Exceptions that provide an error code. */
5733 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5734 && ( uVector == X86_XCPT_DF
5735 || uVector == X86_XCPT_TS
5736 || uVector == X86_XCPT_NP
5737 || uVector == X86_XCPT_SS
5738 || uVector == X86_XCPT_GP
5739 || uVector == X86_XCPT_PF
5740 || uVector == X86_XCPT_AC))
5741 { /* likely */ }
5742 else
5743 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec);
5744
5745 /* Exception error-code reserved bits. */
5746 if (!(pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK))
5747 { /* likely */ }
5748 else
5749 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd);
5750
5751 /* Injecting a software interrupt, software exception or privileged software exception. */
5752 if ( uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
5753 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
5754 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
5755 {
5756 /* Instruction length must be in the range 0-15. */
5757 if (pVmcs->u32EntryInstrLen <= VMX_ENTRY_INSTR_LEN_MAX)
5758 { /* likely */ }
5759 else
5760 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLen);
5761
5762 /* Instruction length of 0 is allowed only when its CPU feature is present. */
5763 if ( pVmcs->u32EntryInstrLen == 0
5764 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEntryInjectSoftInt)
5765 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLenZero);
5766 }
5767 }
5768 }
5769
5770 /* VM-entry MSR-load count and VM-entry MSR-load area address. */
5771 if (pVmcs->u32EntryMsrLoadCount)
5772 {
5773 if ( (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
5774 || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5775 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
5776 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad);
5777 }
5778
5779 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)); /* We don't support SMM yet. */
5780 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON)); /* We don't support dual-monitor treatment yet. */
5781
5782 NOREF(pszInstr);
5783 NOREF(pszFailure);
5784 return VINF_SUCCESS;
5785}
5786
5787
5788/**
5789 * Checks VM-exit controls fields as part of VM-entry.
5790 * See Intel spec. 26.2.1.2 "VM-Exit Control Fields".
5791 *
5792 * @returns VBox status code.
5793 * @param pVCpu The cross context virtual CPU structure.
5794 * @param pszInstr The VMX instruction name (for logging purposes).
5795 */
5796IEM_STATIC int iemVmxVmentryCheckExitCtls(PVMCPU pVCpu, const char *pszInstr)
5797{
5798 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5799 const char * const pszFailure = "VMFail";
5800
5801 /* VM-exit controls. */
5802 VMXCTLSMSR ExitCtls;
5803 ExitCtls.u = CPUMGetGuestIa32VmxExitCtls(pVCpu);
5804 if (~pVmcs->u32ExitCtls & ExitCtls.n.disallowed0)
5805 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
5806
5807 if (pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1)
5808 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1);
5809
5810 /* Save preemption timer without activating it. */
5811 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
5812 && (pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
5813 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer);
5814
5815 /* VM-exit MSR-store count and VM-exit MSR-store area address. */
5816 if (pVmcs->u32ExitMsrStoreCount)
5817 {
5818 if ( (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
5819 || (pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5820 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
5821 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore);
5822 }
5823
5824 /* VM-exit MSR-load count and VM-exit MSR-load area address. */
5825 if (pVmcs->u32ExitMsrLoadCount)
5826 {
5827 if ( (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
5828 || (pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5829 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
5830 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad);
5831 }
5832
5833 NOREF(pszInstr);
5834 NOREF(pszFailure);
5835 return VINF_SUCCESS;
5836}
5837
5838
5839/**
5840 * Checks VM-execution controls fields as part of VM-entry.
5841 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
5842 *
5843 * @returns VBox status code.
5844 * @param pVCpu The cross context virtual CPU structure.
5845 * @param pszInstr The VMX instruction name (for logging purposes).
5846 *
5847 * @remarks This may update secondary-processor based VM-execution control fields
5848 * in the current VMCS if necessary.
5849 */
5850IEM_STATIC int iemVmxVmentryCheckExecCtls(PVMCPU pVCpu, const char *pszInstr)
5851{
5852 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5853 const char * const pszFailure = "VMFail";
5854
5855 /* Pin-based VM-execution controls. */
5856 {
5857 VMXCTLSMSR PinCtls;
5858 PinCtls.u = CPUMGetGuestIa32VmxPinbasedCtls(pVCpu);
5859 if (~pVmcs->u32PinCtls & PinCtls.n.disallowed0)
5860 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
5861
5862 if (pVmcs->u32PinCtls & ~PinCtls.n.allowed1)
5863 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1);
5864 }
5865
5866 /* Processor-based VM-execution controls. */
5867 {
5868 VMXCTLSMSR ProcCtls;
5869 ProcCtls.u = CPUMGetGuestIa32VmxProcbasedCtls(pVCpu);
5870 if (~pVmcs->u32ProcCtls & ProcCtls.n.disallowed0)
5871 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
5872
5873 if (pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1)
5874 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1);
5875 }
5876
5877 /* Secondary processor-based VM-execution controls. */
5878 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
5879 {
5880 VMXCTLSMSR ProcCtls2;
5881 ProcCtls2.u = CPUMGetGuestIa32VmxProcbasedCtls2(pVCpu);
5882 if (~pVmcs->u32ProcCtls2 & ProcCtls2.n.disallowed0)
5883 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
5884
5885 if (pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1)
5886 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1);
5887 }
5888 else
5889 Assert(!pVmcs->u32ProcCtls2);
5890
5891 /* CR3-target count. */
5892 if (pVmcs->u32Cr3TargetCount <= VMX_V_CR3_TARGET_COUNT)
5893 { /* likely */ }
5894 else
5895 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Cr3TargetCount);
5896
5897 /* I/O bitmaps physical addresses. */
5898 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
5899 {
5900 if ( (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK)
5901 || (pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5902 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u))
5903 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA);
5904
5905 if ( (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK)
5906 || (pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5907 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u))
5908 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
5909 }
5910
5911 /* MSR bitmap physical address. */
5912 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
5913 {
5914 RTGCPHYS const GCPhysMsrBitmap = pVmcs->u64AddrMsrBitmap.u;
5915 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
5916 || (GCPhysMsrBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5917 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysMsrBitmap))
5918 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
5919
5920 /* Read the MSR bitmap. */
5921 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
5922 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap),
5923 GCPhysMsrBitmap, VMX_V_MSR_BITMAP_SIZE);
5924 if (RT_FAILURE(rc))
5925 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrBitmapPtrReadPhys);
5926 }
5927
5928 /* TPR shadow related controls. */
5929 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
5930 {
5931 /* Virtual-APIC page physical address. */
5932 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
5933 if ( (GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
5934 || (GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5935 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
5936 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
5937
5938 /* Read the Virtual-APIC page. */
5939 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
5940 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage),
5941 GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES);
5942 if (RT_FAILURE(rc))
5943 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
5944
5945 /* TPR threshold without virtual-interrupt delivery. */
5946 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
5947 && (pVmcs->u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK))
5948 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdRsvd);
5949
5950 /* TPR threshold and VTPR. */
5951 uint8_t const *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
5952 uint8_t const u8VTpr = *(pbVirtApic + XAPIC_OFF_TPR);
5953 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
5954 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
5955 && RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) > ((u8VTpr >> 4) & UINT32_C(0xf)) /* Bits 4:7 */)
5956 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
5957 }
5958 else
5959 {
5960 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
5961 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
5962 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
5963 { /* likely */ }
5964 else
5965 {
5966 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
5967 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicTprShadow);
5968 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
5969 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ApicRegVirt);
5970 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
5971 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtIntDelivery);
5972 }
5973 }
5974
5975 /* NMI exiting and virtual-NMIs. */
5976 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
5977 && (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
5978 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi);
5979
5980 /* Virtual-NMIs and NMI-window exiting. */
5981 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5982 && (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
5983 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit);
5984
5985 /* Virtualize APIC accesses. */
5986 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
5987 {
5988 /* APIC-access physical address. */
5989 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
5990 if ( (GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
5991 || (GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5992 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
5993 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
5994
5995 /*
5996 * Disallow APIC-access page and virtual-APIC page from being the same address.
5997 * Note! This is not an Intel requirement, but one imposed by our implementation.
5998 */
5999 /** @todo r=ramshankar: This is done primarily to simplify recursion scenarios while
6000 * redirecting accesses between the APIC-access page and the virtual-APIC
6001 * page. If any nested hypervisor requires this, we can implement it later. */
6002 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
6003 {
6004 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
6005 if (GCPhysVirtApic == GCPhysApicAccess)
6006 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessEqVirtApic);
6007 }
6008 }
6009
6010 /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */
6011 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6012 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
6013 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
6014
6015 /* Virtual-interrupt delivery requires external interrupt exiting. */
6016 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
6017 && !(pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
6018 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
6019
6020 /* VPID. */
6021 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID)
6022 || pVmcs->u16Vpid != 0)
6023 { /* likely */ }
6024 else
6025 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Vpid);
6026
6027 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_POSTED_INT)); /* We don't support posted interrupts yet. */
6028 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)); /* We don't support EPT yet. */
6029 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PML)); /* We don't support PML yet. */
6030 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)); /* We don't support Unrestricted-guests yet. */
6031 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMFUNC)); /* We don't support VM functions yet. */
6032 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT_VE)); /* We don't support EPT-violation #VE yet. */
6033 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)); /* We don't support Pause-loop exiting yet. */
6034
6035 /* VMCS shadowing. */
6036 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
6037 {
6038 /* VMREAD-bitmap physical address. */
6039 RTGCPHYS const GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
6040 if ( ( GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
6041 || ( GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6042 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
6043 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap);
6044
6045 /* VMWRITE-bitmap physical address. */
6046 RTGCPHYS const GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
6047 if ( ( GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
6048 || ( GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6049 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
6050 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
6051
6052 /* Read the VMREAD-bitmap. */
6053 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
6054 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap),
6055 GCPhysVmreadBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
6056 if (RT_FAILURE(rc))
6057 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys);
6058
6059 /* Read the VMWRITE-bitmap. */
6060 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap));
6061 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap),
6062 GCPhysVmwriteBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
6063 if (RT_FAILURE(rc))
6064 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys);
6065 }
6066
6067 NOREF(pszInstr);
6068 NOREF(pszFailure);
6069 return VINF_SUCCESS;
6070}
6071
6072
6073/**
6074 * Loads the guest control registers, debug register and some MSRs as part of
6075 * VM-entry.
6076 *
6077 * @param pVCpu The cross context virtual CPU structure.
6078 */
6079IEM_STATIC void iemVmxVmentryLoadGuestControlRegsMsrs(PVMCPU pVCpu)
6080{
6081 /*
6082 * Load guest control registers, debug registers and MSRs.
6083 * See Intel spec. 26.3.2.1 "Loading Guest Control Registers, Debug Registers and MSRs".
6084 */
6085 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6086 uint64_t const uGstCr0 = (pVmcs->u64GuestCr0.u & ~VMX_ENTRY_CR0_IGNORE_MASK)
6087 | (pVCpu->cpum.GstCtx.cr0 & VMX_ENTRY_CR0_IGNORE_MASK);
6088 CPUMSetGuestCR0(pVCpu, uGstCr0);
6089 CPUMSetGuestCR4(pVCpu, pVmcs->u64GuestCr4.u);
6090 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64GuestCr3.u;
6091
6092 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
6093 pVCpu->cpum.GstCtx.dr[7] = (pVmcs->u64GuestDr7.u & ~VMX_ENTRY_DR7_MBZ_MASK) | VMX_ENTRY_DR7_MB1_MASK;
6094
6095 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64GuestSysenterEip.s.Lo;
6096 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64GuestSysenterEsp.s.Lo;
6097 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32GuestSysenterCS;
6098
6099 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6100 {
6101 /* FS base and GS base are loaded while loading the rest of the guest segment registers. */
6102
6103 /* EFER MSR. */
6104 if (!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR))
6105 {
6106 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
6107 bool const fGstPaging = RT_BOOL(uGstCr0 & X86_CR0_PG);
6108 uint64_t const uHostEfer = pVCpu->cpum.GstCtx.msrEFER;
6109 if (fGstInLongMode)
6110 {
6111 /* If the nested-guest is in long mode, LMA and LME are both set. */
6112 Assert(fGstPaging);
6113 pVCpu->cpum.GstCtx.msrEFER = uHostEfer | (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
6114 }
6115 else
6116 {
6117 /*
6118 * If the nested-guest is outside long mode:
6119 * - With paging: LMA is cleared, LME is cleared.
6120 * - Without paging: LMA is cleared, LME is left unmodified.
6121 */
6122 uint64_t const fLmaLmeMask = MSR_K6_EFER_LMA | (fGstPaging ? MSR_K6_EFER_LME : 0);
6123 pVCpu->cpum.GstCtx.msrEFER = uHostEfer & ~fLmaLmeMask;
6124 }
6125 }
6126 /* else: see below. */
6127 }
6128
6129 /* PAT MSR. */
6130 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
6131 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64GuestPatMsr.u;
6132
6133 /* EFER MSR. */
6134 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
6135 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64GuestEferMsr.u;
6136
6137 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
6138 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
6139
6140 /* We don't support IA32_BNDCFGS MSR yet. */
6141 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
6142
6143 /* Nothing to do for SMBASE register - We don't support SMM yet. */
6144}
6145
6146
6147/**
6148 * Loads the guest segment registers, GDTR, IDTR, LDTR and TR as part of VM-entry.
6149 *
6150 * @param pVCpu The cross context virtual CPU structure.
6151 */
6152IEM_STATIC void iemVmxVmentryLoadGuestSegRegs(PVMCPU pVCpu)
6153{
6154 /*
6155 * Load guest segment registers, GDTR, IDTR, LDTR and TR.
6156 * See Intel spec. 26.3.2.2 "Loading Guest Segment Registers and Descriptor-Table Registers".
6157 */
6158 /* CS, SS, ES, DS, FS, GS. */
6159 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6160 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
6161 {
6162 PCPUMSELREG pGstSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6163 CPUMSELREG VmcsSelReg;
6164 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &VmcsSelReg);
6165 AssertRC(rc); NOREF(rc);
6166 if (!(VmcsSelReg.Attr.u & X86DESCATTR_UNUSABLE))
6167 {
6168 pGstSelReg->Sel = VmcsSelReg.Sel;
6169 pGstSelReg->ValidSel = VmcsSelReg.Sel;
6170 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6171 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6172 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
6173 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
6174 }
6175 else
6176 {
6177 pGstSelReg->Sel = VmcsSelReg.Sel;
6178 pGstSelReg->ValidSel = VmcsSelReg.Sel;
6179 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6180 switch (iSegReg)
6181 {
6182 case X86_SREG_CS:
6183 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6184 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
6185 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
6186 break;
6187
6188 case X86_SREG_SS:
6189 pGstSelReg->u64Base = VmcsSelReg.u64Base & UINT32_C(0xfffffff0);
6190 pGstSelReg->u32Limit = 0;
6191 pGstSelReg->Attr.u = (VmcsSelReg.Attr.u & X86DESCATTR_DPL) | X86DESCATTR_D | X86DESCATTR_UNUSABLE;
6192 break;
6193
6194 case X86_SREG_ES:
6195 case X86_SREG_DS:
6196 pGstSelReg->u64Base = 0;
6197 pGstSelReg->u32Limit = 0;
6198 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
6199 break;
6200
6201 case X86_SREG_FS:
6202 case X86_SREG_GS:
6203 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6204 pGstSelReg->u32Limit = 0;
6205 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
6206 break;
6207 }
6208 Assert(pGstSelReg->Attr.n.u1Unusable);
6209 }
6210 }
6211
6212 /* LDTR. */
6213 pVCpu->cpum.GstCtx.ldtr.Sel = pVmcs->GuestLdtr;
6214 pVCpu->cpum.GstCtx.ldtr.ValidSel = pVmcs->GuestLdtr;
6215 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
6216 if (!(pVmcs->u32GuestLdtrAttr & X86DESCATTR_UNUSABLE))
6217 {
6218 pVCpu->cpum.GstCtx.ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
6219 pVCpu->cpum.GstCtx.ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
6220 pVCpu->cpum.GstCtx.ldtr.Attr.u = pVmcs->u32GuestLdtrAttr;
6221 }
6222 else
6223 {
6224 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
6225 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
6226 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
6227 }
6228
6229 /* TR. */
6230 Assert(!(pVmcs->u32GuestTrAttr & X86DESCATTR_UNUSABLE));
6231 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->GuestTr;
6232 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->GuestTr;
6233 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
6234 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64GuestTrBase.u;
6235 pVCpu->cpum.GstCtx.tr.u32Limit = pVmcs->u32GuestTrLimit;
6236 pVCpu->cpum.GstCtx.tr.Attr.u = pVmcs->u32GuestTrAttr;
6237
6238 /* GDTR. */
6239 pVCpu->cpum.GstCtx.gdtr.cbGdt = pVmcs->u32GuestGdtrLimit;
6240 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64GuestGdtrBase.u;
6241
6242 /* IDTR. */
6243 pVCpu->cpum.GstCtx.idtr.cbIdt = pVmcs->u32GuestIdtrLimit;
6244 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64GuestIdtrBase.u;
6245}
6246
6247
6248/**
6249 * Loads the guest MSRs from the VM-entry auto-load MSRs as part of VM-entry.
6250 *
6251 * @returns VBox status code.
6252 * @param pVCpu The cross context virtual CPU structure.
6253 * @param pszInstr The VMX instruction name (for logging purposes).
6254 */
6255IEM_STATIC int iemVmxVmentryLoadGuestAutoMsrs(PVMCPU pVCpu, const char *pszInstr)
6256{
6257 /*
6258 * Load guest MSRs.
6259 * See Intel spec. 26.4 "Loading MSRs".
6260 */
6261 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6262 const char *const pszFailure = "VM-exit";
6263
6264 /*
6265 * The VM-entry MSR-load area address need not be a valid guest-physical address if the
6266 * VM-entry MSR load count is 0. If this is the case, bail early without reading it.
6267 * See Intel spec. 24.8.2 "VM-Entry Controls for MSRs".
6268 */
6269 uint32_t const cMsrs = pVmcs->u32EntryMsrLoadCount;
6270 if (!cMsrs)
6271 return VINF_SUCCESS;
6272
6273 /*
6274 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count is
6275 * exceeded including possibly raising #MC exceptions during VMX transition. Our
6276 * implementation shall fail VM-entry with an VMX_EXIT_ERR_MSR_LOAD VM-exit.
6277 */
6278 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
6279 if (fIsMsrCountValid)
6280 { /* likely */ }
6281 else
6282 {
6283 iemVmxVmcsSetExitQual(pVCpu, VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
6284 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadCount);
6285 }
6286
6287 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrEntryMsrLoad.u;
6288 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
6289 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
6290 if (RT_SUCCESS(rc))
6291 {
6292 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
6293 Assert(pMsr);
6294 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
6295 {
6296 if ( !pMsr->u32Reserved
6297 && pMsr->u32Msr != MSR_K8_FS_BASE
6298 && pMsr->u32Msr != MSR_K8_GS_BASE
6299 && pMsr->u32Msr != MSR_K6_EFER
6300 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
6301 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
6302 {
6303 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
6304 if (rcStrict == VINF_SUCCESS)
6305 continue;
6306
6307 /*
6308 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-entry.
6309 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VM-entry failure
6310 * recording the MSR index in the VM-exit qualification (as per the Intel spec.) and indicated
6311 * further by our own, specific diagnostic code. Later, we can try implement handling of the
6312 * MSR in ring-0 if possible, or come up with a better, generic solution.
6313 */
6314 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
6315 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
6316 ? kVmxVDiag_Vmentry_MsrLoadRing3
6317 : kVmxVDiag_Vmentry_MsrLoad;
6318 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
6319 }
6320 else
6321 {
6322 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
6323 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadRsvd);
6324 }
6325 }
6326 }
6327 else
6328 {
6329 AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysAutoMsrArea, rc));
6330 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadPtrReadPhys);
6331 }
6332
6333 NOREF(pszInstr);
6334 NOREF(pszFailure);
6335 return VINF_SUCCESS;
6336}
6337
6338
6339/**
6340 * Loads the guest-state non-register state as part of VM-entry.
6341 *
6342 * @returns VBox status code.
6343 * @param pVCpu The cross context virtual CPU structure.
6344 *
6345 * @remarks This must be called only after loading the nested-guest register state
6346 * (especially nested-guest RIP).
6347 */
6348IEM_STATIC void iemVmxVmentryLoadGuestNonRegState(PVMCPU pVCpu)
6349{
6350 /*
6351 * Load guest non-register state.
6352 * See Intel spec. 26.6 "Special Features of VM Entry"
6353 */
6354 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6355 uint32_t const uEntryIntInfo = pVmcs->u32EntryIntInfo;
6356 if (VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
6357 {
6358 /** @todo NSTVMX: Pending debug exceptions. */
6359 Assert(!(pVmcs->u64GuestPendingDbgXcpt.u));
6360
6361 if (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
6362 {
6363 /** @todo NSTVMX: Virtual-NMIs doesn't affect NMI blocking in the normal sense.
6364 * We probably need a different force flag for virtual-NMI
6365 * pending/blocking. */
6366 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI));
6367 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6368 }
6369 else
6370 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS));
6371
6372 if (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
6373 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
6374 else
6375 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
6376
6377 /* SMI blocking is irrelevant. We don't support SMIs yet. */
6378 }
6379
6380 /* Loading PDPTEs will be taken care when we switch modes. We don't support EPT yet. */
6381 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
6382
6383 /* VPID is irrelevant. We don't support VPID yet. */
6384
6385 /* Clear address-range monitoring. */
6386 EMMonitorWaitClear(pVCpu);
6387}
6388
6389
6390/**
6391 * Loads the guest-state as part of VM-entry.
6392 *
6393 * @returns VBox status code.
6394 * @param pVCpu The cross context virtual CPU structure.
6395 * @param pszInstr The VMX instruction name (for logging purposes).
6396 *
6397 * @remarks This must be done after all the necessary steps prior to loading of
6398 * guest-state (e.g. checking various VMCS state).
6399 */
6400IEM_STATIC int iemVmxVmentryLoadGuestState(PVMCPU pVCpu, const char *pszInstr)
6401{
6402 iemVmxVmentryLoadGuestControlRegsMsrs(pVCpu);
6403 iemVmxVmentryLoadGuestSegRegs(pVCpu);
6404
6405 /*
6406 * Load guest RIP, RSP and RFLAGS.
6407 * See Intel spec. 26.3.2.3 "Loading Guest RIP, RSP and RFLAGS".
6408 */
6409 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6410 pVCpu->cpum.GstCtx.rsp = pVmcs->u64GuestRsp.u;
6411 pVCpu->cpum.GstCtx.rip = pVmcs->u64GuestRip.u;
6412 pVCpu->cpum.GstCtx.rflags.u = pVmcs->u64GuestRFlags.u;
6413
6414 /* Initialize the PAUSE-loop controls as part of VM-entry. */
6415 pVCpu->cpum.GstCtx.hwvirt.vmx.uFirstPauseLoopTick = 0;
6416 pVCpu->cpum.GstCtx.hwvirt.vmx.uPrevPauseTick = 0;
6417
6418 iemVmxVmentryLoadGuestNonRegState(pVCpu);
6419
6420 NOREF(pszInstr);
6421 return VINF_SUCCESS;
6422}
6423
6424
6425/**
6426 * Set up the VMX-preemption timer.
6427 *
6428 * @param pVCpu The cross context virtual CPU structure.
6429 * @param pszInstr The VMX instruction name (for logging purposes).
6430 */
6431IEM_STATIC void iemVmxVmentrySetupPreemptTimer(PVMCPU pVCpu, const char *pszInstr)
6432{
6433 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6434 Assert(pVmcs);
6435 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
6436 {
6437 uint64_t const uVmentryTick = TMCpuTickGetNoCheck(pVCpu);
6438 pVCpu->cpum.GstCtx.hwvirt.vmx.uVmentryTick = uVmentryTick;
6439 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER);
6440
6441 Log(("%s: VM-entry set up VMX-preemption timer at %#RX64\n", pszInstr, uVmentryTick));
6442 }
6443 else
6444 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
6445
6446 NOREF(pszInstr);
6447}
6448
6449
6450/**
6451 * Performs event injection (if any) as part of VM-entry.
6452 *
6453 * @param pVCpu The cross context virtual CPU structure.
6454 * @param pszInstr The VMX instruction name (for logging purposes).
6455 */
6456IEM_STATIC int iemVmxVmentryInjectEvent(PVMCPU pVCpu, const char *pszInstr)
6457{
6458 /*
6459 * Inject events.
6460 * See Intel spec. 26.5 "Event Injection".
6461 */
6462 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6463 uint32_t const uEntryIntInfo = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u32EntryIntInfo;
6464 if (VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
6465 {
6466 /*
6467 * The event that is going to be made pending for injection is not subject to VMX intercepts,
6468 * thus we flag ignoring of intercepts. However, recursive exceptions if any during delivery
6469 * of the current event -are- subject to intercepts, hence this flag will be flipped during
6470 * the actually delivery of this event.
6471 */
6472 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = false;
6473
6474 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
6475 if (uType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT)
6476 {
6477 Assert(VMX_ENTRY_INT_INFO_VECTOR(uEntryIntInfo) == VMX_ENTRY_INT_INFO_VECTOR_MTF);
6478 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_MTF);
6479 return VINF_SUCCESS;
6480 }
6481
6482 int rc = HMVmxEntryIntInfoInjectTrpmEvent(pVCpu, uEntryIntInfo, pVmcs->u32EntryXcptErrCode, pVmcs->u32EntryInstrLen,
6483 pVCpu->cpum.GstCtx.cr2);
6484 AssertRCReturn(rc, rc);
6485 }
6486
6487 NOREF(pszInstr);
6488 return VINF_SUCCESS;
6489}
6490
6491
6492/**
6493 * VMLAUNCH/VMRESUME instruction execution worker.
6494 *
6495 * @returns Strict VBox status code.
6496 * @param pVCpu The cross context virtual CPU structure.
6497 * @param cbInstr The instruction length in bytes.
6498 * @param uInstrId The instruction identity (VMXINSTRID_VMLAUNCH or
6499 * VMXINSTRID_VMRESUME).
6500 * @param pExitInfo Pointer to the VM-exit instruction information struct.
6501 * Optional, can be NULL.
6502 *
6503 * @remarks Common VMX instruction checks are already expected to by the caller,
6504 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
6505 */
6506IEM_STATIC VBOXSTRICTRC iemVmxVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId, PCVMXVEXITINFO pExitInfo)
6507{
6508 Assert( uInstrId == VMXINSTRID_VMLAUNCH
6509 || uInstrId == VMXINSTRID_VMRESUME);
6510 const char *pszInstr = uInstrId == VMXINSTRID_VMRESUME ? "vmresume" : "vmlaunch";
6511
6512 /* Nested-guest intercept. */
6513 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6514 {
6515 if (pExitInfo)
6516 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
6517 uint32_t const uExitReason = uInstrId == VMXINSTRID_VMRESUME ? VMX_EXIT_VMRESUME : VMX_EXIT_VMLAUNCH;
6518 return iemVmxVmexitInstrNeedsInfo(pVCpu, uExitReason, uInstrId, cbInstr);
6519 }
6520
6521 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
6522
6523 /* CPL. */
6524 if (pVCpu->iem.s.uCpl > 0)
6525 {
6526 Log(("%s: CPL %u -> #GP(0)\n", pszInstr, pVCpu->iem.s.uCpl));
6527 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_Cpl;
6528 return iemRaiseGeneralProtectionFault0(pVCpu);
6529 }
6530
6531 /* Current VMCS valid. */
6532 if (!IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
6533 {
6534 Log(("%s: VMCS pointer %#RGp invalid -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
6535 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrInvalid;
6536 iemVmxVmFailInvalid(pVCpu);
6537 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6538 return VINF_SUCCESS;
6539 }
6540
6541 /** @todo Distinguish block-by-MOV-SS from block-by-STI. Currently we
6542 * use block-by-STI here which is not quite correct. */
6543 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
6544 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
6545 {
6546 Log(("%s: VM entry with events blocked by MOV SS -> VMFail\n", pszInstr));
6547 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_BlocKMovSS;
6548 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_BLOCK_MOVSS);
6549 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6550 return VINF_SUCCESS;
6551 }
6552
6553 if (uInstrId == VMXINSTRID_VMLAUNCH)
6554 {
6555 /* VMLAUNCH with non-clear VMCS. */
6556 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_STATE_CLEAR)
6557 { /* likely */ }
6558 else
6559 {
6560 Log(("vmlaunch: VMLAUNCH with non-clear VMCS -> VMFail\n"));
6561 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsClear;
6562 iemVmxVmFail(pVCpu, VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS);
6563 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6564 return VINF_SUCCESS;
6565 }
6566 }
6567 else
6568 {
6569 /* VMRESUME with non-launched VMCS. */
6570 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_STATE_LAUNCHED)
6571 { /* likely */ }
6572 else
6573 {
6574 Log(("vmresume: VMRESUME with non-launched VMCS -> VMFail\n"));
6575 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsLaunch;
6576 iemVmxVmFail(pVCpu, VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS);
6577 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6578 return VINF_SUCCESS;
6579 }
6580 }
6581
6582 /*
6583 * Load the current VMCS.
6584 */
6585 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
6586 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs),
6587 IEM_VMX_GET_CURRENT_VMCS(pVCpu), VMX_V_VMCS_SIZE);
6588 if (RT_FAILURE(rc))
6589 {
6590 Log(("%s: Failed to read VMCS at %#RGp, rc=%Rrc\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu), rc));
6591 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrReadPhys;
6592 return rc;
6593 }
6594
6595 /*
6596 * We are allowed to cache VMCS related data structures (such as I/O bitmaps, MSR bitmaps)
6597 * while entering VMX non-root mode. We do some of this while checking VM-execution
6598 * controls. The guest hypervisor should not make assumptions and cannot expect
6599 * predictable behavior if changes to these structures are made in guest memory while
6600 * executing in VMX non-root mode. As far as VirtualBox is concerned, the guest cannot
6601 * modify them anyway as we cache them in host memory. We are trade memory for speed here.
6602 *
6603 * See Intel spec. 24.11.4 "Software Access to Related Structures".
6604 */
6605 rc = iemVmxVmentryCheckExecCtls(pVCpu, pszInstr);
6606 if (RT_SUCCESS(rc))
6607 {
6608 rc = iemVmxVmentryCheckExitCtls(pVCpu, pszInstr);
6609 if (RT_SUCCESS(rc))
6610 {
6611 rc = iemVmxVmentryCheckEntryCtls(pVCpu, pszInstr);
6612 if (RT_SUCCESS(rc))
6613 {
6614 rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr);
6615 if (RT_SUCCESS(rc))
6616 {
6617 /* Save the guest force-flags as VM-exits can occur from this point on. */
6618 iemVmxVmentrySaveForceFlags(pVCpu);
6619
6620 /* Initialize the VM-exit qualification field as it MBZ for VM-exits where it isn't specified. */
6621 iemVmxVmcsSetExitQual(pVCpu, 0);
6622
6623 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
6624 if (RT_SUCCESS(rc))
6625 {
6626 rc = iemVmxVmentryLoadGuestState(pVCpu, pszInstr);
6627 if (RT_SUCCESS(rc))
6628 {
6629 rc = iemVmxVmentryLoadGuestAutoMsrs(pVCpu, pszInstr);
6630 if (RT_SUCCESS(rc))
6631 {
6632 Assert(rc != VINF_CPUM_R3_MSR_WRITE);
6633
6634 /* VMLAUNCH instruction must update the VMCS launch state. */
6635 if (uInstrId == VMXINSTRID_VMLAUNCH)
6636 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = VMX_V_VMCS_STATE_LAUNCHED;
6637
6638 /* Perform the VMX transition (PGM updates). */
6639 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
6640 if (rcStrict == VINF_SUCCESS)
6641 { /* likely */ }
6642 else if (RT_SUCCESS(rcStrict))
6643 {
6644 Log3(("%s: iemVmxWorldSwitch returns %Rrc -> Setting passup status\n", pszInstr,
6645 VBOXSTRICTRC_VAL(rcStrict)));
6646 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6647 }
6648 else
6649 {
6650 Log3(("%s: iemVmxWorldSwitch failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
6651 return rcStrict;
6652 }
6653
6654 /* We've now entered nested-guest execution. */
6655 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true;
6656
6657 /*
6658 * The priority of potential VM-exits during VM-entry is important.
6659 * The priorities of VM-exits and events are listed from highest
6660 * to lowest as follows:
6661 *
6662 * 1. Event injection.
6663 * 2. TPR below threshold / APIC-write.
6664 * 3. SMI.
6665 * 4. INIT.
6666 * 5. MTF exit.
6667 * 6. Pending debug exceptions.
6668 * 7. Debug-trap exceptions.
6669 * 8. VMX-preemption timer.
6670 * 9. NMI-window exit.
6671 * 10. NMI injection.
6672 * 11. Interrupt-window exit.
6673 * 12. Interrupt injection.
6674 */
6675
6676 /* Setup the VMX-preemption timer. */
6677 iemVmxVmentrySetupPreemptTimer(pVCpu, pszInstr);
6678
6679 /* Now that we've switched page tables, we can inject events if any. */
6680 iemVmxVmentryInjectEvent(pVCpu, pszInstr);
6681
6682 return VINF_SUCCESS;
6683 }
6684 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED);
6685 }
6686 }
6687 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED);
6688 }
6689
6690 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_HOST_STATE);
6691 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6692 return VINF_SUCCESS;
6693 }
6694 }
6695 }
6696
6697 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
6698 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6699 return VINF_SUCCESS;
6700}
6701
6702
6703/**
6704 * Checks whether an RDMSR or WRMSR instruction for the given MSR is intercepted
6705 * (causes a VM-exit) or not.
6706 *
6707 * @returns @c true if the instruction is intercepted, @c false otherwise.
6708 * @param pVCpu The cross context virtual CPU structure.
6709 * @param uExitReason The VM-exit exit reason (VMX_EXIT_RDMSR or
6710 * VMX_EXIT_WRMSR).
6711 * @param idMsr The MSR.
6712 */
6713IEM_STATIC bool iemVmxIsRdmsrWrmsrInterceptSet(PVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr)
6714{
6715 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
6716 Assert( uExitReason == VMX_EXIT_RDMSR
6717 || uExitReason == VMX_EXIT_WRMSR);
6718
6719 /* Consult the MSR bitmap if the feature is supported. */
6720 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6721 Assert(pVmcs);
6722 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
6723 {
6724 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
6725 if (uExitReason == VMX_EXIT_RDMSR)
6726 {
6727 VMXMSREXITREAD enmRead;
6728 int rc = HMVmxGetMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, &enmRead,
6729 NULL /* penmWrite */);
6730 AssertRC(rc);
6731 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
6732 return true;
6733 }
6734 else
6735 {
6736 VMXMSREXITWRITE enmWrite;
6737 int rc = HMVmxGetMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, NULL /* penmRead */,
6738 &enmWrite);
6739 AssertRC(rc);
6740 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
6741 return true;
6742 }
6743 return false;
6744 }
6745
6746 /* Without MSR bitmaps, all MSR accesses are intercepted. */
6747 return true;
6748}
6749
6750
6751/**
6752 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field is
6753 * intercepted (causes a VM-exit) or not.
6754 *
6755 * @returns @c true if the instruction is intercepted, @c false otherwise.
6756 * @param pVCpu The cross context virtual CPU structure.
6757 * @param u64FieldEnc The VMCS field encoding.
6758 * @param uExitReason The VM-exit exit reason (VMX_EXIT_VMREAD or
6759 * VMX_EXIT_VMREAD).
6760 */
6761IEM_STATIC bool iemVmxIsVmreadVmwriteInterceptSet(PVMCPU pVCpu, uint32_t uExitReason, uint64_t u64FieldEnc)
6762{
6763 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
6764 Assert( uExitReason == VMX_EXIT_VMREAD
6765 || uExitReason == VMX_EXIT_VMWRITE);
6766
6767 /* Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted. */
6768 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing)
6769 return true;
6770
6771 /*
6772 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE is intercepted.
6773 * This excludes any reserved bits in the valid parts of the field encoding (i.e. bit 12).
6774 */
6775 if (u64FieldEnc & VMX_VMCS_ENC_RSVD_MASK)
6776 return true;
6777
6778 /* Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not. */
6779 uint32_t u32FieldEnc = RT_LO_U32(u64FieldEnc);
6780 Assert(u32FieldEnc >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
6781 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
6782 uint8_t const *pbBitmap = uExitReason == VMX_EXIT_VMREAD
6783 ? (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap)
6784 : (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap);
6785 pbBitmap += (u32FieldEnc >> 3);
6786 if (*pbBitmap & RT_BIT(u32FieldEnc & 7))
6787 return true;
6788
6789 return false;
6790}
6791
6792
6793/**
6794 * VMREAD common (memory/register) instruction execution worker
6795 *
6796 * @returns Strict VBox status code.
6797 * @param pVCpu The cross context virtual CPU structure.
6798 * @param cbInstr The instruction length in bytes.
6799 * @param pu64Dst Where to write the VMCS value (only updated when
6800 * VINF_SUCCESS is returned).
6801 * @param u64FieldEnc The VMCS field encoding.
6802 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
6803 * be NULL.
6804 */
6805IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
6806 PCVMXVEXITINFO pExitInfo)
6807{
6808 /* Nested-guest intercept. */
6809 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6810 && iemVmxIsVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMREAD, u64FieldEnc))
6811 {
6812 if (pExitInfo)
6813 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
6814 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMREAD, VMXINSTRID_VMREAD, cbInstr);
6815 }
6816
6817 /* CPL. */
6818 if (pVCpu->iem.s.uCpl > 0)
6819 {
6820 Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
6821 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Cpl;
6822 return iemRaiseGeneralProtectionFault0(pVCpu);
6823 }
6824
6825 /* VMCS pointer in root mode. */
6826 if ( IEM_VMX_IS_ROOT_MODE(pVCpu)
6827 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
6828 {
6829 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
6830 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrInvalid;
6831 iemVmxVmFailInvalid(pVCpu);
6832 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6833 return VINF_SUCCESS;
6834 }
6835
6836 /* VMCS-link pointer in non-root mode. */
6837 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6838 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
6839 {
6840 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
6841 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_LinkPtrInvalid;
6842 iemVmxVmFailInvalid(pVCpu);
6843 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6844 return VINF_SUCCESS;
6845 }
6846
6847 /* Supported VMCS field. */
6848 if (iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
6849 {
6850 Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
6851 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_FieldInvalid;
6852 iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
6853 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6854 return VINF_SUCCESS;
6855 }
6856
6857 /*
6858 * Setup reading from the current or shadow VMCS.
6859 */
6860 uint8_t *pbVmcs;
6861 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6862 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
6863 else
6864 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6865 Assert(pbVmcs);
6866
6867 VMXVMCSFIELDENC FieldEnc;
6868 FieldEnc.u = RT_LO_U32(u64FieldEnc);
6869 uint8_t const uWidth = FieldEnc.n.u2Width;
6870 uint8_t const uType = FieldEnc.n.u2Type;
6871 uint8_t const uWidthType = (uWidth << 2) | uType;
6872 uint8_t const uIndex = FieldEnc.n.u8Index;
6873 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
6874 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
6875
6876 /*
6877 * Read the VMCS component based on the field's effective width.
6878 *
6879 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
6880 * indicates high bits (little endian).
6881 *
6882 * Note! The caller is responsible to trim the result and update registers
6883 * or memory locations are required. Here we just zero-extend to the largest
6884 * type (i.e. 64-bits).
6885 */
6886 uint8_t *pbField = pbVmcs + offField;
6887 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
6888 switch (uEffWidth)
6889 {
6890 case VMX_VMCS_ENC_WIDTH_64BIT:
6891 case VMX_VMCS_ENC_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break;
6892 case VMX_VMCS_ENC_WIDTH_32BIT: *pu64Dst = *(uint32_t *)pbField; break;
6893 case VMX_VMCS_ENC_WIDTH_16BIT: *pu64Dst = *(uint16_t *)pbField; break;
6894 }
6895 return VINF_SUCCESS;
6896}
6897
6898
6899/**
6900 * VMREAD (64-bit register) instruction execution worker.
6901 *
6902 * @returns Strict VBox status code.
6903 * @param pVCpu The cross context virtual CPU structure.
6904 * @param cbInstr The instruction length in bytes.
6905 * @param pu64Dst Where to store the VMCS field's value.
6906 * @param u64FieldEnc The VMCS field encoding.
6907 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
6908 * be NULL.
6909 */
6910IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
6911 PCVMXVEXITINFO pExitInfo)
6912{
6913 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
6914 if (rcStrict == VINF_SUCCESS)
6915 {
6916 iemVmxVmreadSuccess(pVCpu, cbInstr);
6917 return VINF_SUCCESS;
6918 }
6919
6920 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6921 return rcStrict;
6922}
6923
6924
6925/**
6926 * VMREAD (32-bit register) instruction execution worker.
6927 *
6928 * @returns Strict VBox status code.
6929 * @param pVCpu The cross context virtual CPU structure.
6930 * @param cbInstr The instruction length in bytes.
6931 * @param pu32Dst Where to store the VMCS field's value.
6932 * @param u32FieldEnc The VMCS field encoding.
6933 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
6934 * be NULL.
6935 */
6936IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPU pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint64_t u32FieldEnc,
6937 PCVMXVEXITINFO pExitInfo)
6938{
6939 uint64_t u64Dst;
6940 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u32FieldEnc, pExitInfo);
6941 if (rcStrict == VINF_SUCCESS)
6942 {
6943 *pu32Dst = u64Dst;
6944 iemVmxVmreadSuccess(pVCpu, cbInstr);
6945 return VINF_SUCCESS;
6946 }
6947
6948 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
6949 return rcStrict;
6950}
6951
6952
6953/**
6954 * VMREAD (memory) instruction execution worker.
6955 *
6956 * @returns Strict VBox status code.
6957 * @param pVCpu The cross context virtual CPU structure.
6958 * @param cbInstr The instruction length in bytes.
6959 * @param iEffSeg The effective segment register to use with @a u64Val.
6960 * Pass UINT8_MAX if it is a register access.
6961 * @param enmEffAddrMode The effective addressing mode (only used with memory
6962 * operand).
6963 * @param GCPtrDst The guest linear address to store the VMCS field's
6964 * value.
6965 * @param u64FieldEnc The VMCS field encoding.
6966 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
6967 * be NULL.
6968 */
6969IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode,
6970 RTGCPTR GCPtrDst, uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
6971{
6972 uint64_t u64Dst;
6973 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u64FieldEnc, pExitInfo);
6974 if (rcStrict == VINF_SUCCESS)
6975 {
6976 /*
6977 * Write the VMCS field's value to the location specified in guest-memory.
6978 *
6979 * The pointer size depends on the address size (address-size prefix allowed).
6980 * The operand size depends on IA-32e mode (operand-size prefix not allowed).
6981 */
6982 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
6983 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
6984 GCPtrDst &= s_auAddrSizeMasks[enmEffAddrMode];
6985
6986 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6987 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
6988 else
6989 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
6990 if (rcStrict == VINF_SUCCESS)
6991 {
6992 iemVmxVmreadSuccess(pVCpu, cbInstr);
6993 return VINF_SUCCESS;
6994 }
6995
6996 Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
6997 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrMap;
6998 return rcStrict;
6999 }
7000
7001 Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7002 return rcStrict;
7003}
7004
7005
7006/**
7007 * VMWRITE instruction execution worker.
7008 *
7009 * @returns Strict VBox status code.
7010 * @param pVCpu The cross context virtual CPU structure.
7011 * @param cbInstr The instruction length in bytes.
7012 * @param iEffSeg The effective segment register to use with @a u64Val.
7013 * Pass UINT8_MAX if it is a register access.
7014 * @param enmEffAddrMode The effective addressing mode (only used with memory
7015 * operand).
7016 * @param u64Val The value to write (or guest linear address to the
7017 * value), @a iEffSeg will indicate if it's a memory
7018 * operand.
7019 * @param u64FieldEnc The VMCS field encoding.
7020 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7021 * be NULL.
7022 */
7023IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, uint64_t u64Val,
7024 uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
7025{
7026 /* Nested-guest intercept. */
7027 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7028 && iemVmxIsVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMWRITE, u64FieldEnc))
7029 {
7030 if (pExitInfo)
7031 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7032 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMWRITE, VMXINSTRID_VMWRITE, cbInstr);
7033 }
7034
7035 /* CPL. */
7036 if (pVCpu->iem.s.uCpl > 0)
7037 {
7038 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7039 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Cpl;
7040 return iemRaiseGeneralProtectionFault0(pVCpu);
7041 }
7042
7043 /* VMCS pointer in root mode. */
7044 if ( IEM_VMX_IS_ROOT_MODE(pVCpu)
7045 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
7046 {
7047 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7048 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrInvalid;
7049 iemVmxVmFailInvalid(pVCpu);
7050 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7051 return VINF_SUCCESS;
7052 }
7053
7054 /* VMCS-link pointer in non-root mode. */
7055 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7056 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
7057 {
7058 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
7059 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_LinkPtrInvalid;
7060 iemVmxVmFailInvalid(pVCpu);
7061 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7062 return VINF_SUCCESS;
7063 }
7064
7065 /* If the VMWRITE instruction references memory, access the specified memory operand. */
7066 bool const fIsRegOperand = iEffSeg == UINT8_MAX;
7067 if (!fIsRegOperand)
7068 {
7069 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
7070 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
7071 RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[enmEffAddrMode];
7072
7073 /* Read the value from the specified guest memory location. */
7074 VBOXSTRICTRC rcStrict;
7075 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7076 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
7077 else
7078 {
7079 uint32_t u32Val;
7080 rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, iEffSeg, GCPtrVal);
7081 u64Val = u32Val;
7082 }
7083 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7084 {
7085 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
7086 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrMap;
7087 return rcStrict;
7088 }
7089 }
7090 else
7091 Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
7092
7093 /* Supported VMCS field. */
7094 if (!iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
7095 {
7096 Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
7097 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldInvalid;
7098 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
7099 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7100 return VINF_SUCCESS;
7101 }
7102
7103 /* Read-only VMCS field. */
7104 bool const fIsFieldReadOnly = HMVmxIsVmcsFieldReadOnly(u64FieldEnc);
7105 if ( fIsFieldReadOnly
7106 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
7107 {
7108 Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64FieldEnc));
7109 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldRo;
7110 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
7111 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7112 return VINF_SUCCESS;
7113 }
7114
7115 /*
7116 * Setup writing to the current or shadow VMCS.
7117 */
7118 uint8_t *pbVmcs;
7119 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7120 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
7121 else
7122 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7123 Assert(pbVmcs);
7124
7125 VMXVMCSFIELDENC FieldEnc;
7126 FieldEnc.u = RT_LO_U32(u64FieldEnc);
7127 uint8_t const uWidth = FieldEnc.n.u2Width;
7128 uint8_t const uType = FieldEnc.n.u2Type;
7129 uint8_t const uWidthType = (uWidth << 2) | uType;
7130 uint8_t const uIndex = FieldEnc.n.u8Index;
7131 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
7132 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
7133
7134 /*
7135 * Write the VMCS component based on the field's effective width.
7136 *
7137 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
7138 * indicates high bits (little endian).
7139 */
7140 uint8_t *pbField = pbVmcs + offField;
7141 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
7142 switch (uEffWidth)
7143 {
7144 case VMX_VMCS_ENC_WIDTH_64BIT:
7145 case VMX_VMCS_ENC_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
7146 case VMX_VMCS_ENC_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break;
7147 case VMX_VMCS_ENC_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break;
7148 }
7149
7150 iemVmxVmSucceed(pVCpu);
7151 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7152 return VINF_SUCCESS;
7153}
7154
7155
7156/**
7157 * VMCLEAR instruction execution worker.
7158 *
7159 * @returns Strict VBox status code.
7160 * @param pVCpu The cross context virtual CPU structure.
7161 * @param cbInstr The instruction length in bytes.
7162 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
7163 * @param GCPtrVmcs The linear address of the VMCS pointer.
7164 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7165 * be NULL.
7166 *
7167 * @remarks Common VMX instruction checks are already expected to by the caller,
7168 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7169 */
7170IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
7171 PCVMXVEXITINFO pExitInfo)
7172{
7173 /* Nested-guest intercept. */
7174 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7175 {
7176 if (pExitInfo)
7177 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7178 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMCLEAR, VMXINSTRID_NONE, cbInstr);
7179 }
7180
7181 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7182
7183 /* CPL. */
7184 if (pVCpu->iem.s.uCpl > 0)
7185 {
7186 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7187 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Cpl;
7188 return iemRaiseGeneralProtectionFault0(pVCpu);
7189 }
7190
7191 /* Get the VMCS pointer from the location specified by the source memory operand. */
7192 RTGCPHYS GCPhysVmcs;
7193 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
7194 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7195 {
7196 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
7197 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrMap;
7198 return rcStrict;
7199 }
7200
7201 /* VMCS pointer alignment. */
7202 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
7203 {
7204 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
7205 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAlign;
7206 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7207 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7208 return VINF_SUCCESS;
7209 }
7210
7211 /* VMCS physical-address width limits. */
7212 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
7213 {
7214 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
7215 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrWidth;
7216 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7217 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7218 return VINF_SUCCESS;
7219 }
7220
7221 /* VMCS is not the VMXON region. */
7222 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
7223 {
7224 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
7225 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrVmxon;
7226 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
7227 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7228 return VINF_SUCCESS;
7229 }
7230
7231 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
7232 restriction imposed by our implementation. */
7233 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
7234 {
7235 Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
7236 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAbnormal;
7237 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7238 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7239 return VINF_SUCCESS;
7240 }
7241
7242 /*
7243 * VMCLEAR allows committing and clearing any valid VMCS pointer.
7244 *
7245 * If the current VMCS is the one being cleared, set its state to 'clear' and commit
7246 * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
7247 * to 'clear'.
7248 */
7249 uint8_t const fVmcsStateClear = VMX_V_VMCS_STATE_CLEAR;
7250 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
7251 {
7252 Assert(GCPhysVmcs != NIL_RTGCPHYS); /* Paranoia. */
7253 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
7254 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsStateClear;
7255 iemVmxCommitCurrentVmcsToMemory(pVCpu);
7256 Assert(!IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
7257 }
7258 else
7259 {
7260 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPtrVmcs + RT_UOFFSETOF(VMXVVMCS, fVmcsState),
7261 (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear));
7262 }
7263
7264 iemVmxVmSucceed(pVCpu);
7265 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7266 return rcStrict;
7267}
7268
7269
7270/**
7271 * VMPTRST instruction execution worker.
7272 *
7273 * @returns Strict VBox status code.
7274 * @param pVCpu The cross context virtual CPU structure.
7275 * @param cbInstr The instruction length in bytes.
7276 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
7277 * @param GCPtrVmcs The linear address of where to store the current VMCS
7278 * pointer.
7279 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7280 * be NULL.
7281 *
7282 * @remarks Common VMX instruction checks are already expected to by the caller,
7283 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7284 */
7285IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
7286 PCVMXVEXITINFO pExitInfo)
7287{
7288 /* Nested-guest intercept. */
7289 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7290 {
7291 if (pExitInfo)
7292 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7293 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRST, VMXINSTRID_NONE, cbInstr);
7294 }
7295
7296 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7297
7298 /* CPL. */
7299 if (pVCpu->iem.s.uCpl > 0)
7300 {
7301 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7302 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Cpl;
7303 return iemRaiseGeneralProtectionFault0(pVCpu);
7304 }
7305
7306 /* Set the VMCS pointer to the location specified by the destination memory operand. */
7307 AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
7308 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
7309 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7310 {
7311 iemVmxVmSucceed(pVCpu);
7312 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7313 return rcStrict;
7314 }
7315
7316 Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7317 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_PtrMap;
7318 return rcStrict;
7319}
7320
7321
7322/**
7323 * VMPTRLD instruction execution worker.
7324 *
7325 * @returns Strict VBox status code.
7326 * @param pVCpu The cross context virtual CPU structure.
7327 * @param cbInstr The instruction length in bytes.
7328 * @param GCPtrVmcs The linear address of the current VMCS pointer.
7329 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7330 * be NULL.
7331 *
7332 * @remarks Common VMX instruction checks are already expected to by the caller,
7333 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7334 */
7335IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
7336 PCVMXVEXITINFO pExitInfo)
7337{
7338 /* Nested-guest intercept. */
7339 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7340 {
7341 if (pExitInfo)
7342 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7343 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRLD, VMXINSTRID_NONE, cbInstr);
7344 }
7345
7346 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7347
7348 /* CPL. */
7349 if (pVCpu->iem.s.uCpl > 0)
7350 {
7351 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7352 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Cpl;
7353 return iemRaiseGeneralProtectionFault0(pVCpu);
7354 }
7355
7356 /* Get the VMCS pointer from the location specified by the source memory operand. */
7357 RTGCPHYS GCPhysVmcs;
7358 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
7359 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7360 {
7361 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
7362 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrMap;
7363 return rcStrict;
7364 }
7365
7366 /* VMCS pointer alignment. */
7367 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
7368 {
7369 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
7370 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAlign;
7371 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
7372 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7373 return VINF_SUCCESS;
7374 }
7375
7376 /* VMCS physical-address width limits. */
7377 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
7378 {
7379 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
7380 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrWidth;
7381 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
7382 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7383 return VINF_SUCCESS;
7384 }
7385
7386 /* VMCS is not the VMXON region. */
7387 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
7388 {
7389 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
7390 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrVmxon;
7391 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
7392 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7393 return VINF_SUCCESS;
7394 }
7395
7396 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
7397 restriction imposed by our implementation. */
7398 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
7399 {
7400 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
7401 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAbnormal;
7402 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
7403 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7404 return VINF_SUCCESS;
7405 }
7406
7407 /* Read the VMCS revision ID from the VMCS. */
7408 VMXVMCSREVID VmcsRevId;
7409 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
7410 if (RT_FAILURE(rc))
7411 {
7412 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
7413 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrReadPhys;
7414 return rc;
7415 }
7416
7417 /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
7418 also check VMCS shadowing feature. */
7419 if ( VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
7420 || ( VmcsRevId.n.fIsShadowVmcs
7421 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
7422 {
7423 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
7424 {
7425 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
7426 VmcsRevId.n.u31RevisionId));
7427 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_VmcsRevId;
7428 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
7429 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7430 return VINF_SUCCESS;
7431 }
7432
7433 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
7434 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_ShadowVmcs;
7435 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
7436 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7437 return VINF_SUCCESS;
7438 }
7439
7440 /*
7441 * We only maintain only the current VMCS in our virtual CPU context (CPUMCTX). Therefore,
7442 * VMPTRLD shall always flush any existing current VMCS back to guest memory before loading
7443 * a new VMCS as current.
7444 */
7445 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
7446 {
7447 iemVmxCommitCurrentVmcsToMemory(pVCpu);
7448 IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
7449 }
7450
7451 iemVmxVmSucceed(pVCpu);
7452 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7453 return VINF_SUCCESS;
7454}
7455
7456
7457/**
7458 * VMXON instruction execution worker.
7459 *
7460 * @returns Strict VBox status code.
7461 * @param pVCpu The cross context virtual CPU structure.
7462 * @param cbInstr The instruction length in bytes.
7463 * @param iEffSeg The effective segment register to use with @a
7464 * GCPtrVmxon.
7465 * @param GCPtrVmxon The linear address of the VMXON pointer.
7466 * @param pExitInfo Pointer to the VM-exit instruction information struct.
7467 * Optional, can be NULL.
7468 *
7469 * @remarks Common VMX instruction checks are already expected to by the caller,
7470 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7471 */
7472IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
7473 PCVMXVEXITINFO pExitInfo)
7474{
7475#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
7476 RT_NOREF5(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
7477 return VINF_EM_RAW_EMULATE_INSTR;
7478#else
7479 if (!IEM_VMX_IS_ROOT_MODE(pVCpu))
7480 {
7481 /* CPL. */
7482 if (pVCpu->iem.s.uCpl > 0)
7483 {
7484 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7485 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cpl;
7486 return iemRaiseGeneralProtectionFault0(pVCpu);
7487 }
7488
7489 /* A20M (A20 Masked) mode. */
7490 if (!PGMPhysIsA20Enabled(pVCpu))
7491 {
7492 Log(("vmxon: A20M mode -> #GP(0)\n"));
7493 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_A20M;
7494 return iemRaiseGeneralProtectionFault0(pVCpu);
7495 }
7496
7497 /* CR0. */
7498 {
7499 /* CR0 MB1 bits. */
7500 uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
7501 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
7502 {
7503 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
7504 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed0;
7505 return iemRaiseGeneralProtectionFault0(pVCpu);
7506 }
7507
7508 /* CR0 MBZ bits. */
7509 uint64_t const uCr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
7510 if (pVCpu->cpum.GstCtx.cr0 & ~uCr0Fixed1)
7511 {
7512 Log(("vmxon: CR0 fixed1 bits set -> #GP(0)\n"));
7513 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed1;
7514 return iemRaiseGeneralProtectionFault0(pVCpu);
7515 }
7516 }
7517
7518 /* CR4. */
7519 {
7520 /* CR4 MB1 bits. */
7521 uint64_t const uCr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
7522 if ((pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0) != uCr4Fixed0)
7523 {
7524 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
7525 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed0;
7526 return iemRaiseGeneralProtectionFault0(pVCpu);
7527 }
7528
7529 /* CR4 MBZ bits. */
7530 uint64_t const uCr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
7531 if (pVCpu->cpum.GstCtx.cr4 & ~uCr4Fixed1)
7532 {
7533 Log(("vmxon: CR4 fixed1 bits set -> #GP(0)\n"));
7534 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed1;
7535 return iemRaiseGeneralProtectionFault0(pVCpu);
7536 }
7537 }
7538
7539 /* Feature control MSR's LOCK and VMXON bits. */
7540 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
7541 if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
7542 {
7543 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
7544 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_MsrFeatCtl;
7545 return iemRaiseGeneralProtectionFault0(pVCpu);
7546 }
7547
7548 /* Get the VMXON pointer from the location specified by the source memory operand. */
7549 RTGCPHYS GCPhysVmxon;
7550 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
7551 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7552 {
7553 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
7554 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrMap;
7555 return rcStrict;
7556 }
7557
7558 /* VMXON region pointer alignment. */
7559 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
7560 {
7561 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
7562 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAlign;
7563 iemVmxVmFailInvalid(pVCpu);
7564 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7565 return VINF_SUCCESS;
7566 }
7567
7568 /* VMXON physical-address width limits. */
7569 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
7570 {
7571 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
7572 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrWidth;
7573 iemVmxVmFailInvalid(pVCpu);
7574 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7575 return VINF_SUCCESS;
7576 }
7577
7578 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
7579 restriction imposed by our implementation. */
7580 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
7581 {
7582 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
7583 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAbnormal;
7584 iemVmxVmFailInvalid(pVCpu);
7585 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7586 return VINF_SUCCESS;
7587 }
7588
7589 /* Read the VMCS revision ID from the VMXON region. */
7590 VMXVMCSREVID VmcsRevId;
7591 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
7592 if (RT_FAILURE(rc))
7593 {
7594 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
7595 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrReadPhys;
7596 return rc;
7597 }
7598
7599 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
7600 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
7601 {
7602 /* Revision ID mismatch. */
7603 if (!VmcsRevId.n.fIsShadowVmcs)
7604 {
7605 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
7606 VmcsRevId.n.u31RevisionId));
7607 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmcsRevId;
7608 iemVmxVmFailInvalid(pVCpu);
7609 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7610 return VINF_SUCCESS;
7611 }
7612
7613 /* Shadow VMCS disallowed. */
7614 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
7615 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_ShadowVmcs;
7616 iemVmxVmFailInvalid(pVCpu);
7617 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7618 return VINF_SUCCESS;
7619 }
7620
7621 /*
7622 * Record that we're in VMX operation, block INIT, block and disable A20M.
7623 */
7624 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
7625 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
7626 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
7627
7628 /* Clear address-range monitoring. */
7629 EMMonitorWaitClear(pVCpu);
7630 /** @todo NSTVMX: Intel PT. */
7631
7632 iemVmxVmSucceed(pVCpu);
7633 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7634# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
7635 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
7636# else
7637 return VINF_SUCCESS;
7638# endif
7639 }
7640 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7641 {
7642 /* Nested-guest intercept. */
7643 if (pExitInfo)
7644 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7645 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMXON, VMXINSTRID_NONE, cbInstr);
7646 }
7647
7648 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7649
7650 /* CPL. */
7651 if (pVCpu->iem.s.uCpl > 0)
7652 {
7653 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7654 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxRootCpl;
7655 return iemRaiseGeneralProtectionFault0(pVCpu);
7656 }
7657
7658 /* VMXON when already in VMX root mode. */
7659 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
7660 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxAlreadyRoot;
7661 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7662 return VINF_SUCCESS;
7663#endif
7664}
7665
7666
7667/**
7668 * Implements 'VMXOFF'.
7669 *
7670 * @remarks Common VMX instruction checks are already expected to by the caller,
7671 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7672 */
7673IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
7674{
7675# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
7676 RT_NOREF2(pVCpu, cbInstr);
7677 return VINF_EM_RAW_EMULATE_INSTR;
7678# else
7679 /* Nested-guest intercept. */
7680 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7681 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMXOFF, cbInstr);
7682
7683 /* CPL. */
7684 if (pVCpu->iem.s.uCpl > 0)
7685 {
7686 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7687 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxoff_Cpl;
7688 return iemRaiseGeneralProtectionFault0(pVCpu);
7689 }
7690
7691 /* Dual monitor treatment of SMIs and SMM. */
7692 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
7693 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
7694 {
7695 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
7696 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7697 return VINF_SUCCESS;
7698 }
7699
7700 /* Record that we're no longer in VMX root operation, block INIT, block and disable A20M. */
7701 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
7702 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
7703
7704 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
7705 { /** @todo NSTVMX: Unblock SMI. */ }
7706
7707 EMMonitorWaitClear(pVCpu);
7708 /** @todo NSTVMX: Unblock and enable A20M. */
7709
7710 iemVmxVmSucceed(pVCpu);
7711 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7712# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
7713 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
7714# else
7715 return VINF_SUCCESS;
7716# endif
7717# endif
7718}
7719
7720
7721/**
7722 * Implements 'VMXON'.
7723 */
7724IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
7725{
7726 return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
7727}
7728
7729
7730/**
7731 * Implements 'VMLAUNCH'.
7732 */
7733IEM_CIMPL_DEF_0(iemCImpl_vmlaunch)
7734{
7735 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMLAUNCH, NULL /* pExitInfo */);
7736}
7737
7738
7739/**
7740 * Implements 'VMRESUME'.
7741 */
7742IEM_CIMPL_DEF_0(iemCImpl_vmresume)
7743{
7744 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMRESUME, NULL /* pExitInfo */);
7745}
7746
7747
7748/**
7749 * Implements 'VMPTRLD'.
7750 */
7751IEM_CIMPL_DEF_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
7752{
7753 return iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
7754}
7755
7756
7757/**
7758 * Implements 'VMPTRST'.
7759 */
7760IEM_CIMPL_DEF_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
7761{
7762 return iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
7763}
7764
7765
7766/**
7767 * Implements 'VMCLEAR'.
7768 */
7769IEM_CIMPL_DEF_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
7770{
7771 return iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
7772}
7773
7774
7775/**
7776 * Implements 'VMWRITE' register.
7777 */
7778IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64FieldEnc)
7779{
7780 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, IEMMODE_64BIT /* N/A */, u64Val, u64FieldEnc,
7781 NULL /* pExitInfo */);
7782}
7783
7784
7785/**
7786 * Implements 'VMWRITE' memory.
7787 */
7788IEM_CIMPL_DEF_4(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrVal, uint32_t, u64FieldEnc)
7789{
7790 return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrVal, u64FieldEnc, NULL /* pExitInfo */);
7791}
7792
7793
7794/**
7795 * Implements 'VMREAD' 64-bit register.
7796 */
7797IEM_CIMPL_DEF_2(iemCImpl_vmread64_reg, uint64_t *, pu64Dst, uint64_t, u64FieldEnc)
7798{
7799 return iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, NULL /* pExitInfo */);
7800}
7801
7802
7803/**
7804 * Implements 'VMREAD' 32-bit register.
7805 */
7806IEM_CIMPL_DEF_2(iemCImpl_vmread32_reg, uint32_t *, pu32Dst, uint32_t, u32FieldEnc)
7807{
7808 return iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u32FieldEnc, NULL /* pExitInfo */);
7809}
7810
7811
7812/**
7813 * Implements 'VMREAD' memory.
7814 */
7815IEM_CIMPL_DEF_4(iemCImpl_vmread_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrDst, uint32_t, u64FieldEnc)
7816{
7817 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, u64FieldEnc, NULL /* pExitInfo */);
7818}
7819
7820
7821/**
7822 * Implements VMX's implementation of PAUSE.
7823 */
7824IEM_CIMPL_DEF_0(iemCImpl_vmx_pause)
7825{
7826 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7827 {
7828 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrPause(pVCpu, cbInstr);
7829 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
7830 return rcStrict;
7831 }
7832
7833 /*
7834 * Outside VMX non-root operation or if the PAUSE instruction does not cause
7835 * a VM-exit, the instruction operates normally.
7836 */
7837 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7838 return VINF_SUCCESS;
7839}
7840
7841#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
7842
7843
7844/**
7845 * Implements 'VMCALL'.
7846 */
7847IEM_CIMPL_DEF_0(iemCImpl_vmcall)
7848{
7849#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7850 /* Nested-guest intercept. */
7851 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7852 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMCALL, cbInstr);
7853#endif
7854
7855 /* Join forces with vmmcall. */
7856 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
7857}
7858
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette