VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h@ 75510

Last change on this file since 75510 was 75510, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 Try to unify signalling of TPR/EOI/Self-IPI virtualization/APIC-write emulation operations. Also remove
the duplicate handling of virtualization of x2APIC MSR accesses (as distinct from MSR intercepts). Currently we handle it in CPUM,
and IEM calls into CPUM so it should be handled fine, no need to handle it in IEM too.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 316.1 KB
Line 
1/* $Id: IEMAllCImplVmxInstr.cpp.h 75510 2018-11-16 08:36:57Z vboxsync $ */
2/** @file
3 * IEM - VT-x instruction implementation.
4 */
5
6/*
7 * Copyright (C) 2011-2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
20/** @todo NSTVMX: The following VM-exit intercepts are pending:
21 * VMX_EXIT_IO_SMI
22 * VMX_EXIT_SMI
23 * VMX_EXIT_INT_WINDOW
24 * VMX_EXIT_NMI_WINDOW
25 * VMX_EXIT_GETSEC
26 * VMX_EXIT_RSM
27 * VMX_EXIT_MTF
28 * VMX_EXIT_MONITOR (APIC access VM-exit caused by MONITOR pending)
29 * VMX_EXIT_ERR_MACHINE_CHECK
30 * VMX_EXIT_TPR_BELOW_THRESHOLD
31 * VMX_EXIT_APIC_ACCESS
32 * VMX_EXIT_VIRTUALIZED_EOI
33 * VMX_EXIT_EPT_VIOLATION
34 * VMX_EXIT_EPT_MISCONFIG
35 * VMX_EXIT_INVEPT
36 * VMX_EXIT_PREEMPT_TIMER
37 * VMX_EXIT_INVVPID
38 * VMX_EXIT_APIC_WRITE
39 * VMX_EXIT_RDRAND
40 * VMX_EXIT_VMFUNC
41 * VMX_EXIT_ENCLS
42 * VMX_EXIT_RDSEED
43 * VMX_EXIT_PML_FULL
44 * VMX_EXIT_XSAVES
45 * VMX_EXIT_XRSTORS
46 */
47
48/**
49 * Map of VMCS field encodings to their virtual-VMCS structure offsets.
50 *
51 * The first array dimension is VMCS field encoding of Width OR'ed with Type and the
52 * second dimension is the Index, see VMXVMCSFIELDENC.
53 */
54uint16_t const g_aoffVmcsMap[16][VMX_V_VMCS_MAX_INDEX + 1] =
55{
56 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
57 {
58 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u16Vpid),
59 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u16PostIntNotifyVector),
60 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u16EptpIndex),
61 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
62 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
63 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
64 },
65 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
66 {
67 /* 0-7 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
68 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
69 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
70 /* 24-25 */ UINT16_MAX, UINT16_MAX
71 },
72 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
73 {
74 /* 0 */ RT_UOFFSETOF(VMXVVMCS, GuestEs),
75 /* 1 */ RT_UOFFSETOF(VMXVVMCS, GuestCs),
76 /* 2 */ RT_UOFFSETOF(VMXVVMCS, GuestSs),
77 /* 3 */ RT_UOFFSETOF(VMXVVMCS, GuestDs),
78 /* 4 */ RT_UOFFSETOF(VMXVVMCS, GuestFs),
79 /* 5 */ RT_UOFFSETOF(VMXVVMCS, GuestGs),
80 /* 6 */ RT_UOFFSETOF(VMXVVMCS, GuestLdtr),
81 /* 7 */ RT_UOFFSETOF(VMXVVMCS, GuestTr),
82 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u16GuestIntStatus),
83 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u16PmlIndex),
84 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
85 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
86 },
87 /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
88 {
89 /* 0 */ RT_UOFFSETOF(VMXVVMCS, HostEs),
90 /* 1 */ RT_UOFFSETOF(VMXVVMCS, HostCs),
91 /* 2 */ RT_UOFFSETOF(VMXVVMCS, HostSs),
92 /* 3 */ RT_UOFFSETOF(VMXVVMCS, HostDs),
93 /* 4 */ RT_UOFFSETOF(VMXVVMCS, HostFs),
94 /* 5 */ RT_UOFFSETOF(VMXVVMCS, HostGs),
95 /* 6 */ RT_UOFFSETOF(VMXVVMCS, HostTr),
96 /* 7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
97 /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
98 /* 23-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX
99 },
100 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
101 {
102 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64AddrIoBitmapA),
103 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64AddrIoBitmapB),
104 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64AddrMsrBitmap),
105 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64AddrExitMsrStore),
106 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64AddrExitMsrLoad),
107 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEntryMsrLoad),
108 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64ExecVmcsPtr),
109 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64AddrPml),
110 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64TscOffset),
111 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVirtApic),
112 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64AddrApicAccess),
113 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64AddrPostedIntDesc),
114 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64VmFuncCtls),
115 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64EptpPtr),
116 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap0),
117 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap1),
118 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap2),
119 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u64EoiExitBitmap3),
120 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEptpList),
121 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVmreadBitmap),
122 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u64AddrVmwriteBitmap),
123 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u64AddrXcptVeInfo),
124 /* 22 */ RT_UOFFSETOF(VMXVVMCS, u64XssBitmap),
125 /* 23 */ RT_UOFFSETOF(VMXVVMCS, u64AddrEnclsBitmap),
126 /* 24 */ UINT16_MAX,
127 /* 25 */ RT_UOFFSETOF(VMXVVMCS, u64TscMultiplier)
128 },
129 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
130 {
131 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64RoGuestPhysAddr),
132 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
133 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
134 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
135 /* 25 */ UINT16_MAX
136 },
137 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
138 {
139 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
140 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
141 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPatMsr),
142 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64GuestEferMsr),
143 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
144 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte0),
145 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte1),
146 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte2),
147 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPdpte3),
148 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
149 /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
150 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
151 },
152 /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
153 {
154 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64HostPatMsr),
155 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64HostEferMsr),
156 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
157 /* 3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
158 /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
159 /* 19-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
160 },
161 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
162 {
163 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32PinCtls),
164 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32ProcCtls),
165 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32XcptBitmap),
166 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32XcptPFMask),
167 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32XcptPFMatch),
168 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32Cr3TargetCount),
169 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32ExitCtls),
170 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
171 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
172 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u32EntryCtls),
173 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
174 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u32EntryIntInfo),
175 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
176 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u32EntryInstrLen),
177 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u32TprThreshold),
178 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u32ProcCtls2),
179 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u32PleGap),
180 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u32PleWindow),
181 /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
182 },
183 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
184 {
185 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32RoVmInstrError),
186 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitReason),
187 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitIntInfo),
188 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitIntErrCode),
189 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
190 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
191 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitInstrLen),
192 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32RoExitInstrInfo),
193 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
194 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
195 /* 24-25 */ UINT16_MAX, UINT16_MAX
196 },
197 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
198 {
199 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsLimit),
200 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u32GuestCsLimit),
201 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSsLimit),
202 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u32GuestDsLimit),
203 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsLimit),
204 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u32GuestFsLimit),
205 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGsLimit),
206 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
207 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u32GuestTrLimit),
208 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
209 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
210 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u32GuestEsAttr),
211 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u32GuestCsAttr),
212 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSsAttr),
213 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u32GuestDsAttr),
214 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u32GuestFsAttr),
215 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u32GuestGsAttr),
216 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
217 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u32GuestTrAttr),
218 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u32GuestIntrState),
219 /* 20 */ RT_UOFFSETOF(VMXVVMCS, u32GuestActivityState),
220 /* 21 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSmBase),
221 /* 22 */ RT_UOFFSETOF(VMXVVMCS, u32GuestSysenterCS),
222 /* 23 */ RT_UOFFSETOF(VMXVVMCS, u32PreemptTimer),
223 /* 24-25 */ UINT16_MAX, UINT16_MAX
224 },
225 /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
226 {
227 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u32HostSysenterCs),
228 /* 1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
229 /* 9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
230 /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
231 /* 25 */ UINT16_MAX
232 },
233 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_CONTROL: */
234 {
235 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64Cr0Mask),
236 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64Cr4Mask),
237 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
238 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
239 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target0),
240 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target1),
241 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target2),
242 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64Cr3Target3),
243 /* 8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
244 /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
245 /* 24-25 */ UINT16_MAX, UINT16_MAX
246 },
247 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
248 {
249 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64RoExitQual),
250 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRcx),
251 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRsi),
252 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRdi),
253 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64RoIoRip),
254 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64RoGuestLinearAddr),
255 /* 6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
256 /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
257 /* 22-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
258 },
259 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
260 {
261 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr0),
262 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr3),
263 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCr4),
264 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64GuestEsBase),
265 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64GuestCsBase),
266 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSsBase),
267 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDsBase),
268 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64GuestFsBase),
269 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64GuestGsBase),
270 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64GuestLdtrBase),
271 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64GuestTrBase),
272 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64GuestGdtrBase),
273 /* 12 */ RT_UOFFSETOF(VMXVVMCS, u64GuestIdtrBase),
274 /* 13 */ RT_UOFFSETOF(VMXVVMCS, u64GuestDr7),
275 /* 14 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRsp),
276 /* 15 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRip),
277 /* 16 */ RT_UOFFSETOF(VMXVVMCS, u64GuestRFlags),
278 /* 17 */ RT_UOFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpt),
279 /* 18 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
280 /* 19 */ RT_UOFFSETOF(VMXVVMCS, u64GuestSysenterEip),
281 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
282 },
283 /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_HOST_STATE: */
284 {
285 /* 0 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr0),
286 /* 1 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr3),
287 /* 2 */ RT_UOFFSETOF(VMXVVMCS, u64HostCr4),
288 /* 3 */ RT_UOFFSETOF(VMXVVMCS, u64HostFsBase),
289 /* 4 */ RT_UOFFSETOF(VMXVVMCS, u64HostGsBase),
290 /* 5 */ RT_UOFFSETOF(VMXVVMCS, u64HostTrBase),
291 /* 6 */ RT_UOFFSETOF(VMXVVMCS, u64HostGdtrBase),
292 /* 7 */ RT_UOFFSETOF(VMXVVMCS, u64HostIdtrBase),
293 /* 8 */ RT_UOFFSETOF(VMXVVMCS, u64HostSysenterEsp),
294 /* 9 */ RT_UOFFSETOF(VMXVVMCS, u64HostSysenterEip),
295 /* 10 */ RT_UOFFSETOF(VMXVVMCS, u64HostRsp),
296 /* 11 */ RT_UOFFSETOF(VMXVVMCS, u64HostRip),
297 /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
298 /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
299 }
300};
301
302
303/**
304 * Gets the ModR/M, SIB and displacement byte(s) from decoded opcodes given their
305 * relative offsets.
306 */
307# ifdef IEM_WITH_CODE_TLB
308# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0)
309# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0)
310# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
311# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0)
312# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
313# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0)
314# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
315# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0)
316# error "Implement me: Getting ModR/M, SIB, displacement needs to work even when instruction crosses a page boundary."
317# else /* !IEM_WITH_CODE_TLB */
318# define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \
319 do \
320 { \
321 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \
322 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \
323 } while (0)
324
325# define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib)
326
327# define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \
328 do \
329 { \
330 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
331 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
332 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
333 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \
334 } while (0)
335
336# define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \
337 do \
338 { \
339 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \
340 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
341 } while (0)
342
343# define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \
344 do \
345 { \
346 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
347 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
348 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
349 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
350 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
351 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
352 } while (0)
353
354# define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \
355 do \
356 { \
357 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
358 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
359 } while (0)
360
361# define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
362 do \
363 { \
364 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \
365 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \
366 } while (0)
367
368# define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \
369 do \
370 { \
371 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \
372 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \
373 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \
374 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \
375 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \
376 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \
377 } while (0)
378# endif /* !IEM_WITH_CODE_TLB */
379
380/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
381#define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
382
383/** Whether a shadow VMCS is present for the given VCPU. */
384#define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
385
386/** Gets the VMXON region pointer. */
387#define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
388
389/** Gets the guest-physical address of the current VMCS for the given VCPU. */
390#define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
391
392/** Whether a current VMCS is present for the given VCPU. */
393#define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
394
395/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
396#define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
397 do \
398 { \
399 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
400 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
401 } while (0)
402
403/** Clears any current VMCS for the given VCPU. */
404#define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
405 do \
406 { \
407 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
408 } while (0)
409
410/** Check for VMX instructions requiring to be in VMX operation.
411 * @note Any changes here, check if IEMOP_HLP_IN_VMX_OPERATION needs updating. */
412#define IEM_VMX_IN_VMX_OPERATION(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
413 do \
414 { \
415 if (IEM_VMX_IS_ROOT_MODE(a_pVCpu)) \
416 { /* likely */ } \
417 else \
418 { \
419 Log((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
420 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
421 return iemRaiseUndefinedOpcode(a_pVCpu); \
422 } \
423 } while (0)
424
425/** Marks a VM-entry failure with a diagnostic reason, logs and returns. */
426#define IEM_VMX_VMENTRY_FAILED_RET(a_pVCpu, a_pszInstr, a_pszFailure, a_VmxDiag) \
427 do \
428 { \
429 Log(("%s: VM-entry failed! enmDiag=%u (%s) -> %s\n", (a_pszInstr), (a_VmxDiag), \
430 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
431 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
432 return VERR_VMX_VMENTRY_FAILED; \
433 } while (0)
434
435/** Marks a VM-exit failure with a diagnostic reason, logs and returns. */
436#define IEM_VMX_VMEXIT_FAILED_RET(a_pVCpu, a_uExitReason, a_pszFailure, a_VmxDiag) \
437 do \
438 { \
439 Log(("VM-exit failed! uExitReason=%u enmDiag=%u (%s) -> %s\n", (a_uExitReason), (a_VmxDiag), \
440 HMVmxGetDiagDesc(a_VmxDiag), (a_pszFailure))); \
441 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmDiag = (a_VmxDiag); \
442 return VERR_VMX_VMEXIT_FAILED; \
443 } while (0)
444
445
446/**
447 * Returns whether the given VMCS field is valid and supported by our emulation.
448 *
449 * @param pVCpu The cross context virtual CPU structure.
450 * @param u64FieldEnc The VMCS field encoding.
451 *
452 * @remarks This takes into account the CPU features exposed to the guest.
453 */
454IEM_STATIC bool iemVmxIsVmcsFieldValid(PVMCPU pVCpu, uint64_t u64FieldEnc)
455{
456 uint32_t const uFieldEncHi = RT_HI_U32(u64FieldEnc);
457 uint32_t const uFieldEncLo = RT_LO_U32(u64FieldEnc);
458 if (!uFieldEncHi)
459 { /* likely */ }
460 else
461 return false;
462
463 PCCPUMFEATURES pFeat = IEM_GET_GUEST_CPU_FEATURES(pVCpu);
464 switch (uFieldEncLo)
465 {
466 /*
467 * 16-bit fields.
468 */
469 /* Control fields. */
470 case VMX_VMCS16_VPID: return pFeat->fVmxVpid;
471 case VMX_VMCS16_POSTED_INT_NOTIFY_VECTOR: return pFeat->fVmxPostedInt;
472 case VMX_VMCS16_EPTP_INDEX: return pFeat->fVmxEptXcptVe;
473
474 /* Guest-state fields. */
475 case VMX_VMCS16_GUEST_ES_SEL:
476 case VMX_VMCS16_GUEST_CS_SEL:
477 case VMX_VMCS16_GUEST_SS_SEL:
478 case VMX_VMCS16_GUEST_DS_SEL:
479 case VMX_VMCS16_GUEST_FS_SEL:
480 case VMX_VMCS16_GUEST_GS_SEL:
481 case VMX_VMCS16_GUEST_LDTR_SEL:
482 case VMX_VMCS16_GUEST_TR_SEL:
483 case VMX_VMCS16_GUEST_INTR_STATUS: return pFeat->fVmxVirtIntDelivery;
484 case VMX_VMCS16_GUEST_PML_INDEX: return pFeat->fVmxPml;
485
486 /* Host-state fields. */
487 case VMX_VMCS16_HOST_ES_SEL:
488 case VMX_VMCS16_HOST_CS_SEL:
489 case VMX_VMCS16_HOST_SS_SEL:
490 case VMX_VMCS16_HOST_DS_SEL:
491 case VMX_VMCS16_HOST_FS_SEL:
492 case VMX_VMCS16_HOST_GS_SEL:
493 case VMX_VMCS16_HOST_TR_SEL: return true;
494
495 /*
496 * 64-bit fields.
497 */
498 /* Control fields. */
499 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
500 case VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH:
501 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
502 case VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH: return pFeat->fVmxUseIoBitmaps;
503 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
504 case VMX_VMCS64_CTRL_MSR_BITMAP_HIGH: return pFeat->fVmxUseMsrBitmaps;
505 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
506 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH:
507 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
508 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH:
509 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
510 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH:
511 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
512 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH: return true;
513 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_FULL:
514 case VMX_VMCS64_CTRL_EXEC_PML_ADDR_HIGH: return pFeat->fVmxPml;
515 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
516 case VMX_VMCS64_CTRL_TSC_OFFSET_HIGH: return true;
517 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_FULL:
518 case VMX_VMCS64_CTRL_VIRT_APIC_PAGEADDR_HIGH: return pFeat->fVmxUseTprShadow;
519 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
520 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH: return pFeat->fVmxVirtApicAccess;
521 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_FULL:
522 case VMX_VMCS64_CTRL_POSTED_INTR_DESC_HIGH: return pFeat->fVmxPostedInt;
523 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
524 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH: return pFeat->fVmxVmFunc;
525 case VMX_VMCS64_CTRL_EPTP_FULL:
526 case VMX_VMCS64_CTRL_EPTP_HIGH: return pFeat->fVmxEpt;
527 case VMX_VMCS64_CTRL_EOI_BITMAP_0_FULL:
528 case VMX_VMCS64_CTRL_EOI_BITMAP_0_HIGH:
529 case VMX_VMCS64_CTRL_EOI_BITMAP_1_FULL:
530 case VMX_VMCS64_CTRL_EOI_BITMAP_1_HIGH:
531 case VMX_VMCS64_CTRL_EOI_BITMAP_2_FULL:
532 case VMX_VMCS64_CTRL_EOI_BITMAP_2_HIGH:
533 case VMX_VMCS64_CTRL_EOI_BITMAP_3_FULL:
534 case VMX_VMCS64_CTRL_EOI_BITMAP_3_HIGH: return pFeat->fVmxVirtIntDelivery;
535 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
536 case VMX_VMCS64_CTRL_EPTP_LIST_HIGH:
537 {
538 uint64_t const uVmFuncMsr = CPUMGetGuestIa32VmxVmFunc(pVCpu);
539 return RT_BOOL(RT_BF_GET(uVmFuncMsr, VMX_BF_VMFUNC_EPTP_SWITCHING));
540 }
541 case VMX_VMCS64_CTRL_VMREAD_BITMAP_FULL:
542 case VMX_VMCS64_CTRL_VMREAD_BITMAP_HIGH:
543 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_FULL:
544 case VMX_VMCS64_CTRL_VMWRITE_BITMAP_HIGH: return pFeat->fVmxVmcsShadowing;
545 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_FULL:
546 case VMX_VMCS64_CTRL_VIRTXCPT_INFO_ADDR_HIGH: return pFeat->fVmxEptXcptVe;
547 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_FULL:
548 case VMX_VMCS64_CTRL_XSS_EXITING_BITMAP_HIGH: return pFeat->fVmxXsavesXrstors;
549 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_FULL:
550 case VMX_VMCS64_CTRL_ENCLS_EXITING_BITMAP_HIGH: return false;
551 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_FULL:
552 case VMX_VMCS64_CTRL_TSC_MULTIPLIER_HIGH: return pFeat->fVmxUseTscScaling;
553
554 /* Read-only data fields. */
555 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_FULL:
556 case VMX_VMCS64_RO_GUEST_PHYS_ADDR_HIGH: return pFeat->fVmxEpt;
557
558 /* Guest-state fields. */
559 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
560 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH:
561 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
562 case VMX_VMCS64_GUEST_DEBUGCTL_HIGH: return true;
563 case VMX_VMCS64_GUEST_PAT_FULL:
564 case VMX_VMCS64_GUEST_PAT_HIGH: return pFeat->fVmxEntryLoadPatMsr || pFeat->fVmxExitSavePatMsr;
565 case VMX_VMCS64_GUEST_EFER_FULL:
566 case VMX_VMCS64_GUEST_EFER_HIGH: return pFeat->fVmxEntryLoadEferMsr || pFeat->fVmxExitSaveEferMsr;
567 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
568 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH: return false;
569 case VMX_VMCS64_GUEST_PDPTE0_FULL:
570 case VMX_VMCS64_GUEST_PDPTE0_HIGH:
571 case VMX_VMCS64_GUEST_PDPTE1_FULL:
572 case VMX_VMCS64_GUEST_PDPTE1_HIGH:
573 case VMX_VMCS64_GUEST_PDPTE2_FULL:
574 case VMX_VMCS64_GUEST_PDPTE2_HIGH:
575 case VMX_VMCS64_GUEST_PDPTE3_FULL:
576 case VMX_VMCS64_GUEST_PDPTE3_HIGH: return pFeat->fVmxEpt;
577 case VMX_VMCS64_GUEST_BNDCFGS_FULL:
578 case VMX_VMCS64_GUEST_BNDCFGS_HIGH: return false;
579
580 /* Host-state fields. */
581 case VMX_VMCS64_HOST_PAT_FULL:
582 case VMX_VMCS64_HOST_PAT_HIGH: return pFeat->fVmxExitLoadPatMsr;
583 case VMX_VMCS64_HOST_EFER_FULL:
584 case VMX_VMCS64_HOST_EFER_HIGH: return pFeat->fVmxExitLoadEferMsr;
585 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
586 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH: return false;
587
588 /*
589 * 32-bit fields.
590 */
591 /* Control fields. */
592 case VMX_VMCS32_CTRL_PIN_EXEC:
593 case VMX_VMCS32_CTRL_PROC_EXEC:
594 case VMX_VMCS32_CTRL_EXCEPTION_BITMAP:
595 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK:
596 case VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH:
597 case VMX_VMCS32_CTRL_CR3_TARGET_COUNT:
598 case VMX_VMCS32_CTRL_EXIT:
599 case VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT:
600 case VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT:
601 case VMX_VMCS32_CTRL_ENTRY:
602 case VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT:
603 case VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO:
604 case VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE:
605 case VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH: return true;
606 case VMX_VMCS32_CTRL_TPR_THRESHOLD: return pFeat->fVmxUseTprShadow;
607 case VMX_VMCS32_CTRL_PROC_EXEC2: return pFeat->fVmxSecondaryExecCtls;
608 case VMX_VMCS32_CTRL_PLE_GAP:
609 case VMX_VMCS32_CTRL_PLE_WINDOW: return pFeat->fVmxPauseLoopExit;
610
611 /* Read-only data fields. */
612 case VMX_VMCS32_RO_VM_INSTR_ERROR:
613 case VMX_VMCS32_RO_EXIT_REASON:
614 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
615 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE:
616 case VMX_VMCS32_RO_IDT_VECTORING_INFO:
617 case VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE:
618 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
619 case VMX_VMCS32_RO_EXIT_INSTR_INFO: return true;
620
621 /* Guest-state fields. */
622 case VMX_VMCS32_GUEST_ES_LIMIT:
623 case VMX_VMCS32_GUEST_CS_LIMIT:
624 case VMX_VMCS32_GUEST_SS_LIMIT:
625 case VMX_VMCS32_GUEST_DS_LIMIT:
626 case VMX_VMCS32_GUEST_FS_LIMIT:
627 case VMX_VMCS32_GUEST_GS_LIMIT:
628 case VMX_VMCS32_GUEST_LDTR_LIMIT:
629 case VMX_VMCS32_GUEST_TR_LIMIT:
630 case VMX_VMCS32_GUEST_GDTR_LIMIT:
631 case VMX_VMCS32_GUEST_IDTR_LIMIT:
632 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
633 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
634 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
635 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
636 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
637 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
638 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
639 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
640 case VMX_VMCS32_GUEST_INT_STATE:
641 case VMX_VMCS32_GUEST_ACTIVITY_STATE:
642 case VMX_VMCS32_GUEST_SMBASE:
643 case VMX_VMCS32_GUEST_SYSENTER_CS: return true;
644 case VMX_VMCS32_PREEMPT_TIMER_VALUE: return pFeat->fVmxPreemptTimer;
645
646 /* Host-state fields. */
647 case VMX_VMCS32_HOST_SYSENTER_CS: return true;
648
649 /*
650 * Natural-width fields.
651 */
652 /* Control fields. */
653 case VMX_VMCS_CTRL_CR0_MASK:
654 case VMX_VMCS_CTRL_CR4_MASK:
655 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
656 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
657 case VMX_VMCS_CTRL_CR3_TARGET_VAL0:
658 case VMX_VMCS_CTRL_CR3_TARGET_VAL1:
659 case VMX_VMCS_CTRL_CR3_TARGET_VAL2:
660 case VMX_VMCS_CTRL_CR3_TARGET_VAL3: return true;
661
662 /* Read-only data fields. */
663 case VMX_VMCS_RO_EXIT_QUALIFICATION:
664 case VMX_VMCS_RO_IO_RCX:
665 case VMX_VMCS_RO_IO_RSX:
666 case VMX_VMCS_RO_IO_RDI:
667 case VMX_VMCS_RO_IO_RIP:
668 case VMX_VMCS_RO_GUEST_LINEAR_ADDR: return true;
669
670 /* Guest-state fields. */
671 case VMX_VMCS_GUEST_CR0:
672 case VMX_VMCS_GUEST_CR3:
673 case VMX_VMCS_GUEST_CR4:
674 case VMX_VMCS_GUEST_ES_BASE:
675 case VMX_VMCS_GUEST_CS_BASE:
676 case VMX_VMCS_GUEST_SS_BASE:
677 case VMX_VMCS_GUEST_DS_BASE:
678 case VMX_VMCS_GUEST_FS_BASE:
679 case VMX_VMCS_GUEST_GS_BASE:
680 case VMX_VMCS_GUEST_LDTR_BASE:
681 case VMX_VMCS_GUEST_TR_BASE:
682 case VMX_VMCS_GUEST_GDTR_BASE:
683 case VMX_VMCS_GUEST_IDTR_BASE:
684 case VMX_VMCS_GUEST_DR7:
685 case VMX_VMCS_GUEST_RSP:
686 case VMX_VMCS_GUEST_RIP:
687 case VMX_VMCS_GUEST_RFLAGS:
688 case VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS:
689 case VMX_VMCS_GUEST_SYSENTER_ESP:
690 case VMX_VMCS_GUEST_SYSENTER_EIP: return true;
691
692 /* Host-state fields. */
693 case VMX_VMCS_HOST_CR0:
694 case VMX_VMCS_HOST_CR3:
695 case VMX_VMCS_HOST_CR4:
696 case VMX_VMCS_HOST_FS_BASE:
697 case VMX_VMCS_HOST_GS_BASE:
698 case VMX_VMCS_HOST_TR_BASE:
699 case VMX_VMCS_HOST_GDTR_BASE:
700 case VMX_VMCS_HOST_IDTR_BASE:
701 case VMX_VMCS_HOST_SYSENTER_ESP:
702 case VMX_VMCS_HOST_SYSENTER_EIP:
703 case VMX_VMCS_HOST_RSP:
704 case VMX_VMCS_HOST_RIP: return true;
705 }
706
707 return false;
708}
709
710
711/**
712 * Gets a host selector from the VMCS.
713 *
714 * @param pVmcs Pointer to the virtual VMCS.
715 * @param iSelReg The index of the segment register (X86_SREG_XXX).
716 */
717DECLINLINE(RTSEL) iemVmxVmcsGetHostSelReg(PCVMXVVMCS pVmcs, uint8_t iSegReg)
718{
719 Assert(iSegReg < X86_SREG_COUNT);
720 RTSEL HostSel;
721 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
722 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
723 uint8_t const uWidthType = (uWidth << 2) | uType;
724 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
725 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
726 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
727 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
728 uint8_t const *pbField = pbVmcs + offField;
729 HostSel = *(uint16_t *)pbField;
730 return HostSel;
731}
732
733
734/**
735 * Sets a guest segment register in the VMCS.
736 *
737 * @param pVmcs Pointer to the virtual VMCS.
738 * @param iSegReg The index of the segment register (X86_SREG_XXX).
739 * @param pSelReg Pointer to the segment register.
740 */
741IEM_STATIC void iemVmxVmcsSetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCCPUMSELREG pSelReg)
742{
743 Assert(pSelReg);
744 Assert(iSegReg < X86_SREG_COUNT);
745
746 /* Selector. */
747 {
748 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
749 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
750 uint8_t const uWidthType = (uWidth << 2) | uType;
751 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
752 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
753 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
754 uint8_t *pbVmcs = (uint8_t *)pVmcs;
755 uint8_t *pbField = pbVmcs + offField;
756 *(uint16_t *)pbField = pSelReg->Sel;
757 }
758
759 /* Limit. */
760 {
761 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
762 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
763 uint8_t const uWidthType = (uWidth << 2) | uType;
764 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
765 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
766 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
767 uint8_t *pbVmcs = (uint8_t *)pVmcs;
768 uint8_t *pbField = pbVmcs + offField;
769 *(uint32_t *)pbField = pSelReg->u32Limit;
770 }
771
772 /* Base. */
773 {
774 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
775 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
776 uint8_t const uWidthType = (uWidth << 2) | uType;
777 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
778 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
779 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
780 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
781 uint8_t const *pbField = pbVmcs + offField;
782 *(uint64_t *)pbField = pSelReg->u64Base;
783 }
784
785 /* Attributes. */
786 {
787 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
788 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
789 | X86DESCATTR_UNUSABLE;
790 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
791 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
792 uint8_t const uWidthType = (uWidth << 2) | uType;
793 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
794 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
795 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
796 uint8_t *pbVmcs = (uint8_t *)pVmcs;
797 uint8_t *pbField = pbVmcs + offField;
798 *(uint32_t *)pbField = pSelReg->Attr.u & fValidAttrMask;
799 }
800}
801
802
803/**
804 * Gets a guest segment register from the VMCS.
805 *
806 * @returns VBox status code.
807 * @param pVmcs Pointer to the virtual VMCS.
808 * @param iSegReg The index of the segment register (X86_SREG_XXX).
809 * @param pSelReg Where to store the segment register (only updated when
810 * VINF_SUCCESS is returned).
811 *
812 * @remarks Warning! This does not validate the contents of the retrieved segment
813 * register.
814 */
815IEM_STATIC int iemVmxVmcsGetGuestSegReg(PCVMXVVMCS pVmcs, uint8_t iSegReg, PCPUMSELREG pSelReg)
816{
817 Assert(pSelReg);
818 Assert(iSegReg < X86_SREG_COUNT);
819
820 /* Selector. */
821 uint16_t u16Sel;
822 {
823 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_16BIT;
824 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
825 uint8_t const uWidthType = (uWidth << 2) | uType;
826 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS16_GUEST_ES_SEL, VMX_BF_VMCS_ENC_INDEX);
827 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
828 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
829 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
830 uint8_t const *pbField = pbVmcs + offField;
831 u16Sel = *(uint16_t *)pbField;
832 }
833
834 /* Limit. */
835 uint32_t u32Limit;
836 {
837 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
838 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
839 uint8_t const uWidthType = (uWidth << 2) | uType;
840 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_LIMIT, VMX_BF_VMCS_ENC_INDEX);
841 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
842 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
843 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
844 uint8_t const *pbField = pbVmcs + offField;
845 u32Limit = *(uint32_t *)pbField;
846 }
847
848 /* Base. */
849 uint64_t u64Base;
850 {
851 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
852 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
853 uint8_t const uWidthType = (uWidth << 2) | uType;
854 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS_GUEST_ES_BASE, VMX_BF_VMCS_ENC_INDEX);
855 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
856 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
857 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
858 uint8_t const *pbField = pbVmcs + offField;
859 u64Base = *(uint64_t *)pbField;
860 /** @todo NSTVMX: Should we zero out high bits here for 32-bit virtual CPUs? */
861 }
862
863 /* Attributes. */
864 uint32_t u32Attr;
865 {
866 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_32BIT;
867 uint8_t const uType = VMX_VMCS_ENC_TYPE_GUEST_STATE;
868 uint8_t const uWidthType = (uWidth << 2) | uType;
869 uint8_t const uIndex = (iSegReg << 1) + RT_BF_GET(VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, VMX_BF_VMCS_ENC_INDEX);
870 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_3);
871 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
872 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
873 uint8_t const *pbField = pbVmcs + offField;
874 u32Attr = *(uint32_t *)pbField;
875 }
876
877 pSelReg->Sel = u16Sel;
878 pSelReg->ValidSel = u16Sel;
879 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
880 pSelReg->u32Limit = u32Limit;
881 pSelReg->u64Base = u64Base;
882 pSelReg->Attr.u = u32Attr;
883 return VINF_SUCCESS;
884}
885
886
887/**
888 * Gets a CR3 target value from the VMCS.
889 *
890 * @returns VBox status code.
891 * @param pVmcs Pointer to the virtual VMCS.
892 * @param idxCr3Target The index of the CR3-target value to retrieve.
893 * @param puValue Where to store the CR3-target value.
894 */
895DECLINLINE(uint64_t) iemVmxVmcsGetCr3TargetValue(PCVMXVVMCS pVmcs, uint8_t idxCr3Target)
896{
897 Assert(idxCr3Target < VMX_V_CR3_TARGET_COUNT);
898 uint8_t const uWidth = VMX_VMCS_ENC_WIDTH_NATURAL;
899 uint8_t const uType = VMX_VMCS_ENC_TYPE_CONTROL;
900 uint8_t const uWidthType = (uWidth << 2) | uType;
901 uint8_t const uIndex = (idxCr3Target << 1) + RT_BF_GET(VMX_VMCS_CTRL_CR3_TARGET_VAL0, VMX_BF_VMCS_ENC_INDEX);
902 Assert(uIndex <= VMX_V_VMCS_MAX_INDEX);
903 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
904 uint8_t const *pbVmcs = (uint8_t *)pVmcs;
905 uint8_t const *pbField = pbVmcs + offField;
906 uint64_t const uCr3TargetValue = *(uint64_t *)pbField;
907
908 return uCr3TargetValue;
909}
910
911
912/**
913 * Signal that a virtual-APIC action needs to be performed at a later time (post
914 * instruction execution).
915 *
916 * @param pVCpu The cross context virtual CPU structure.
917 * @param offApic The virtual-APIC page offset that was updated pertaining to
918 * the event.
919 */
920DECLINLINE(void) iemVmxVirtApicSignalAction(PVMCPU pVCpu, uint16_t offApic)
921{
922 Assert(offApic < XAPIC_OFF_END + 4);
923
924 /*
925 * Record the currently updated APIC offset, as we need this later for figuring
926 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
927 * as for supplying the exit qualification when causing an APIC-write VM-exit.
928 */
929 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
930
931 /*
932 * Signal that we need to perform a virtual-APIC action (TPR/PPR/EOI/Self-IPI
933 * virtualization or APIC-write emulation).
934 */
935 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_UPDATE_VAPIC))
936 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_UPDATE_VAPIC);
937}
938
939
940/**
941 * Masks the nested-guest CR0/CR4 mask subjected to the corresponding guest/host
942 * mask and the read-shadow (CR0/CR4 read).
943 *
944 * @returns The masked CR0/CR4.
945 * @param pVCpu The cross context virtual CPU structure.
946 * @param iCrReg The control register (either CR0 or CR4).
947 * @param uGuestCrX The current guest CR0 or guest CR4.
948 */
949IEM_STATIC uint64_t iemVmxMaskCr0CR4(PVMCPU pVCpu, uint8_t iCrReg, uint64_t uGuestCrX)
950{
951 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
952 Assert(iCrReg == 0 || iCrReg == 4);
953
954 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
955 Assert(pVmcs);
956
957 /*
958 * For each CR0 or CR4 bit owned by the host, the corresponding bit is loaded from the
959 * CR0 read shadow or CR4 read shadow. For each CR0 or CR4 bit that is not owned by the
960 * host, the corresponding bit from the guest CR0 or guest CR4 is loaded.
961 *
962 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
963 */
964 uint64_t fGstHostMask;
965 uint64_t fReadShadow;
966 if (iCrReg == 0)
967 {
968 fGstHostMask = pVmcs->u64Cr0Mask.u;
969 fReadShadow = pVmcs->u64Cr0ReadShadow.u;
970 }
971 else
972 {
973 fGstHostMask = pVmcs->u64Cr4Mask.u;
974 fReadShadow = pVmcs->u64Cr4ReadShadow.u;
975 }
976
977 uint64_t const fMaskedCrX = (fReadShadow & fGstHostMask) | (uGuestCrX & ~fGstHostMask);
978 return fMaskedCrX;
979}
980
981
982/**
983 * Gets VM-exit instruction information along with any displacement for an
984 * instruction VM-exit.
985 *
986 * @returns The VM-exit instruction information.
987 * @param pVCpu The cross context virtual CPU structure.
988 * @param uExitReason The VM-exit reason.
989 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_XXX).
990 * @param pGCPtrDisp Where to store the displacement field. Optional, can be
991 * NULL.
992 */
993IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, PRTGCPTR pGCPtrDisp)
994{
995 RTGCPTR GCPtrDisp;
996 VMXEXITINSTRINFO ExitInstrInfo;
997 ExitInstrInfo.u = 0;
998
999 /*
1000 * Get and parse the ModR/M byte from our decoded opcodes.
1001 */
1002 uint8_t bRm;
1003 uint8_t const offModRm = pVCpu->iem.s.offModRm;
1004 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm);
1005 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1006 {
1007 /*
1008 * ModR/M indicates register addressing.
1009 *
1010 * The primary/secondary register operands are reported in the iReg1 or iReg2
1011 * fields depending on whether it is a read/write form.
1012 */
1013 uint8_t idxReg1;
1014 uint8_t idxReg2;
1015 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
1016 {
1017 idxReg1 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
1018 idxReg2 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
1019 }
1020 else
1021 {
1022 idxReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB;
1023 idxReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg;
1024 }
1025 ExitInstrInfo.All.u2Scaling = 0;
1026 ExitInstrInfo.All.iReg1 = idxReg1;
1027 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
1028 ExitInstrInfo.All.fIsRegOperand = 1;
1029 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
1030 ExitInstrInfo.All.iSegReg = 0;
1031 ExitInstrInfo.All.iIdxReg = 0;
1032 ExitInstrInfo.All.fIdxRegInvalid = 1;
1033 ExitInstrInfo.All.iBaseReg = 0;
1034 ExitInstrInfo.All.fBaseRegInvalid = 1;
1035 ExitInstrInfo.All.iReg2 = idxReg2;
1036
1037 /* Displacement not applicable for register addressing. */
1038 GCPtrDisp = 0;
1039 }
1040 else
1041 {
1042 /*
1043 * ModR/M indicates memory addressing.
1044 */
1045 uint8_t uScale = 0;
1046 bool fBaseRegValid = false;
1047 bool fIdxRegValid = false;
1048 uint8_t iBaseReg = 0;
1049 uint8_t iIdxReg = 0;
1050 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
1051 {
1052 /*
1053 * Parse the ModR/M, displacement for 16-bit addressing mode.
1054 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte".
1055 */
1056 uint16_t u16Disp = 0;
1057 uint8_t const offDisp = offModRm + sizeof(bRm);
1058 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
1059 {
1060 /* Displacement without any registers. */
1061 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp);
1062 }
1063 else
1064 {
1065 /* Register (index and base). */
1066 switch (bRm & X86_MODRM_RM_MASK)
1067 {
1068 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
1069 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
1070 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
1071 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
1072 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break;
1073 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break;
1074 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break;
1075 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break;
1076 }
1077
1078 /* Register + displacement. */
1079 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1080 {
1081 case 0: break;
1082 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break;
1083 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break;
1084 default:
1085 {
1086 /* Register addressing, handled at the beginning. */
1087 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
1088 break;
1089 }
1090 }
1091 }
1092
1093 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */
1094 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */
1095 }
1096 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
1097 {
1098 /*
1099 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode.
1100 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte".
1101 */
1102 uint32_t u32Disp = 0;
1103 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
1104 {
1105 /* Displacement without any registers. */
1106 uint8_t const offDisp = offModRm + sizeof(bRm);
1107 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
1108 }
1109 else
1110 {
1111 /* Register (and perhaps scale, index and base). */
1112 uint8_t offDisp = offModRm + sizeof(bRm);
1113 iBaseReg = (bRm & X86_MODRM_RM_MASK);
1114 if (iBaseReg == 4)
1115 {
1116 /* An SIB byte follows the ModR/M byte, parse it. */
1117 uint8_t bSib;
1118 uint8_t const offSib = offModRm + sizeof(bRm);
1119 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
1120
1121 /* A displacement may follow SIB, update its offset. */
1122 offDisp += sizeof(bSib);
1123
1124 /* Get the scale. */
1125 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
1126
1127 /* Get the index register. */
1128 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK;
1129 fIdxRegValid = RT_BOOL(iIdxReg != 4);
1130
1131 /* Get the base register. */
1132 iBaseReg = bSib & X86_SIB_BASE_MASK;
1133 fBaseRegValid = true;
1134 if (iBaseReg == 5)
1135 {
1136 if ((bRm & X86_MODRM_MOD_MASK) == 0)
1137 {
1138 /* Mod is 0 implies a 32-bit displacement with no base. */
1139 fBaseRegValid = false;
1140 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp);
1141 }
1142 else
1143 {
1144 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */
1145 iBaseReg = X86_GREG_xBP;
1146 }
1147 }
1148 }
1149
1150 /* Register + displacement. */
1151 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1152 {
1153 case 0: /* Handled above */ break;
1154 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break;
1155 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break;
1156 default:
1157 {
1158 /* Register addressing, handled at the beginning. */
1159 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
1160 break;
1161 }
1162 }
1163 }
1164
1165 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */
1166 }
1167 else
1168 {
1169 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT);
1170
1171 /*
1172 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode.
1173 * See Intel instruction spec. 2.2 "IA-32e Mode".
1174 */
1175 uint64_t u64Disp = 0;
1176 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5);
1177 if (fRipRelativeAddr)
1178 {
1179 /*
1180 * RIP-relative addressing mode.
1181 *
1182 * The displacement is 32-bit signed implying an offset range of +/-2G.
1183 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing".
1184 */
1185 uint8_t const offDisp = offModRm + sizeof(bRm);
1186 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
1187 }
1188 else
1189 {
1190 uint8_t offDisp = offModRm + sizeof(bRm);
1191
1192 /*
1193 * Register (and perhaps scale, index and base).
1194 *
1195 * REX.B extends the most-significant bit of the base register. However, REX.B
1196 * is ignored while determining whether an SIB follows the opcode. Hence, we
1197 * shall OR any REX.B bit -after- inspecting for an SIB byte below.
1198 *
1199 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings".
1200 */
1201 iBaseReg = (bRm & X86_MODRM_RM_MASK);
1202 if (iBaseReg == 4)
1203 {
1204 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */
1205 uint8_t bSib;
1206 uint8_t const offSib = offModRm + sizeof(bRm);
1207 IEM_SIB_GET_U8(pVCpu, bSib, offSib);
1208
1209 /* Displacement may follow SIB, update its offset. */
1210 offDisp += sizeof(bSib);
1211
1212 /* Get the scale. */
1213 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
1214
1215 /* Get the index. */
1216 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex;
1217 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */
1218
1219 /* Get the base. */
1220 iBaseReg = (bSib & X86_SIB_BASE_MASK);
1221 fBaseRegValid = true;
1222 if (iBaseReg == 5)
1223 {
1224 if ((bRm & X86_MODRM_MOD_MASK) == 0)
1225 {
1226 /* Mod is 0 implies a signed 32-bit displacement with no base. */
1227 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp);
1228 }
1229 else
1230 {
1231 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */
1232 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP;
1233 }
1234 }
1235 }
1236 iBaseReg |= pVCpu->iem.s.uRexB;
1237
1238 /* Register + displacement. */
1239 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
1240 {
1241 case 0: /* Handled above */ break;
1242 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break;
1243 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break;
1244 default:
1245 {
1246 /* Register addressing, handled at the beginning. */
1247 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm));
1248 break;
1249 }
1250 }
1251 }
1252
1253 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp;
1254 }
1255
1256 /*
1257 * The primary or secondary register operand is reported in iReg2 depending
1258 * on whether the primary operand is in read/write form.
1259 */
1260 uint8_t idxReg2;
1261 if (!VMXINSTRID_IS_MODRM_PRIMARY_OP_W(uInstrId))
1262 {
1263 idxReg2 = bRm & X86_MODRM_RM_MASK;
1264 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
1265 idxReg2 |= pVCpu->iem.s.uRexB;
1266 }
1267 else
1268 {
1269 idxReg2 = (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK;
1270 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
1271 idxReg2 |= pVCpu->iem.s.uRexReg;
1272 }
1273 ExitInstrInfo.All.u2Scaling = uScale;
1274 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory addressing. */
1275 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode;
1276 ExitInstrInfo.All.fIsRegOperand = 0;
1277 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize;
1278 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg;
1279 ExitInstrInfo.All.iIdxReg = iIdxReg;
1280 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid;
1281 ExitInstrInfo.All.iBaseReg = iBaseReg;
1282 ExitInstrInfo.All.iIdxReg = !fBaseRegValid;
1283 ExitInstrInfo.All.iReg2 = idxReg2;
1284 }
1285
1286 /*
1287 * Handle exceptions to the norm for certain instructions.
1288 * (e.g. some instructions convey an instruction identity in place of iReg2).
1289 */
1290 switch (uExitReason)
1291 {
1292 case VMX_EXIT_GDTR_IDTR_ACCESS:
1293 {
1294 Assert(VMXINSTRID_IS_VALID(uInstrId));
1295 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
1296 ExitInstrInfo.GdtIdt.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
1297 ExitInstrInfo.GdtIdt.u2Undef0 = 0;
1298 break;
1299 }
1300
1301 case VMX_EXIT_LDTR_TR_ACCESS:
1302 {
1303 Assert(VMXINSTRID_IS_VALID(uInstrId));
1304 Assert(VMXINSTRID_GET_ID(uInstrId) == (uInstrId & 0x3));
1305 ExitInstrInfo.LdtTr.u2InstrId = VMXINSTRID_GET_ID(uInstrId);
1306 ExitInstrInfo.LdtTr.u2Undef0 = 0;
1307 break;
1308 }
1309
1310 case VMX_EXIT_RDRAND:
1311 case VMX_EXIT_RDSEED:
1312 {
1313 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3);
1314 break;
1315 }
1316 }
1317
1318 /* Update displacement and return the constructed VM-exit instruction information field. */
1319 if (pGCPtrDisp)
1320 *pGCPtrDisp = GCPtrDisp;
1321
1322 return ExitInstrInfo.u;
1323}
1324
1325
1326/**
1327 * Converts an IEM exception event type to a VMX event type.
1328 *
1329 * @returns The VMX event type.
1330 * @param uVector The interrupt / exception vector.
1331 * @param fFlags The IEM event flag (see IEM_XCPT_FLAGS_XXX).
1332 */
1333DECLINLINE(uint8_t) iemVmxGetEventType(uint32_t uVector, uint32_t fFlags)
1334{
1335 /* Paranoia (callers may use these interchangeably). */
1336 AssertCompile(VMX_EXIT_INT_INFO_TYPE_NMI == VMX_IDT_VECTORING_INFO_TYPE_NMI);
1337 AssertCompile(VMX_EXIT_INT_INFO_TYPE_HW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT);
1338 AssertCompile(VMX_EXIT_INT_INFO_TYPE_EXT_INT == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT);
1339 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT);
1340 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_INT == VMX_IDT_VECTORING_INFO_TYPE_SW_INT);
1341 AssertCompile(VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
1342 AssertCompile(VMX_EXIT_INT_INFO_TYPE_NMI == VMX_ENTRY_INT_INFO_TYPE_NMI);
1343 AssertCompile(VMX_EXIT_INT_INFO_TYPE_HW_XCPT == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT);
1344 AssertCompile(VMX_EXIT_INT_INFO_TYPE_EXT_INT == VMX_ENTRY_INT_INFO_TYPE_EXT_INT);
1345 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_XCPT == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT);
1346 AssertCompile(VMX_EXIT_INT_INFO_TYPE_SW_INT == VMX_ENTRY_INT_INFO_TYPE_SW_INT);
1347 AssertCompile(VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT);
1348
1349 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1350 {
1351 if (uVector == X86_XCPT_NMI)
1352 return VMX_EXIT_INT_INFO_TYPE_NMI;
1353 return VMX_EXIT_INT_INFO_TYPE_HW_XCPT;
1354 }
1355
1356 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1357 {
1358 if (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR))
1359 return VMX_EXIT_INT_INFO_TYPE_SW_XCPT;
1360 if (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
1361 return VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT;
1362 return VMX_EXIT_INT_INFO_TYPE_SW_INT;
1363 }
1364
1365 Assert(fFlags & IEM_XCPT_FLAGS_T_EXT_INT);
1366 return VMX_EXIT_INT_INFO_TYPE_EXT_INT;
1367}
1368
1369
1370/**
1371 * Sets the VM-instruction error VMCS field.
1372 *
1373 * @param pVCpu The cross context virtual CPU structure.
1374 * @param enmInsErr The VM-instruction error.
1375 */
1376DECL_FORCE_INLINE(void) iemVmxVmcsSetVmInstrErr(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1377{
1378 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1379 pVmcs->u32RoVmInstrError = enmInsErr;
1380}
1381
1382
1383/**
1384 * Sets the VM-exit qualification VMCS field.
1385 *
1386 * @param pVCpu The cross context virtual CPU structure.
1387 * @param uExitQual The VM-exit qualification.
1388 */
1389DECL_FORCE_INLINE(void) iemVmxVmcsSetExitQual(PVMCPU pVCpu, uint64_t uExitQual)
1390{
1391 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1392 pVmcs->u64RoExitQual.u = uExitQual;
1393}
1394
1395
1396/**
1397 * Sets the VM-exit interruption information field.
1398 *
1399 * @param pVCpu The cross context virtual CPU structure.
1400 * @param uExitQual The VM-exit interruption information.
1401 */
1402DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntInfo(PVMCPU pVCpu, uint32_t uExitIntInfo)
1403{
1404 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1405 pVmcs->u32RoExitIntInfo = uExitIntInfo;
1406}
1407
1408
1409/**
1410 * Sets the VM-exit interruption error code.
1411 *
1412 * @param pVCpu The cross context virtual CPU structure.
1413 * @param uErrCode The error code.
1414 */
1415DECL_FORCE_INLINE(void) iemVmxVmcsSetExitIntErrCode(PVMCPU pVCpu, uint32_t uErrCode)
1416{
1417 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1418 pVmcs->u32RoExitIntErrCode = uErrCode;
1419}
1420
1421
1422/**
1423 * Sets the IDT-vectoring information field.
1424 *
1425 * @param pVCpu The cross context virtual CPU structure.
1426 * @param uIdtVectorInfo The IDT-vectoring information.
1427 */
1428DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringInfo(PVMCPU pVCpu, uint32_t uIdtVectorInfo)
1429{
1430 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1431 pVmcs->u32RoIdtVectoringInfo = uIdtVectorInfo;
1432}
1433
1434
1435/**
1436 * Sets the IDT-vectoring error code field.
1437 *
1438 * @param pVCpu The cross context virtual CPU structure.
1439 * @param uErrCode The error code.
1440 */
1441DECL_FORCE_INLINE(void) iemVmxVmcsSetIdtVectoringErrCode(PVMCPU pVCpu, uint32_t uErrCode)
1442{
1443 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1444 pVmcs->u32RoIdtVectoringErrCode = uErrCode;
1445}
1446
1447
1448/**
1449 * Sets the VM-exit guest-linear address VMCS field.
1450 *
1451 * @param pVCpu The cross context virtual CPU structure.
1452 * @param uGuestLinearAddr The VM-exit guest-linear address.
1453 */
1454DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestLinearAddr(PVMCPU pVCpu, uint64_t uGuestLinearAddr)
1455{
1456 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1457 pVmcs->u64RoGuestLinearAddr.u = uGuestLinearAddr;
1458}
1459
1460
1461/**
1462 * Sets the VM-exit guest-physical address VMCS field.
1463 *
1464 * @param pVCpu The cross context virtual CPU structure.
1465 * @param uGuestPhysAddr The VM-exit guest-physical address.
1466 */
1467DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestPhysAddr(PVMCPU pVCpu, uint64_t uGuestPhysAddr)
1468{
1469 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1470 pVmcs->u64RoGuestPhysAddr.u = uGuestPhysAddr;
1471}
1472
1473
1474/**
1475 * Sets the VM-exit instruction length VMCS field.
1476 *
1477 * @param pVCpu The cross context virtual CPU structure.
1478 * @param cbInstr The VM-exit instruction length in bytes.
1479 *
1480 * @remarks Callers may clear this field to 0. Hence, this function does not check
1481 * the validity of the instruction length.
1482 */
1483DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrLen(PVMCPU pVCpu, uint32_t cbInstr)
1484{
1485 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1486 pVmcs->u32RoExitInstrLen = cbInstr;
1487}
1488
1489
1490/**
1491 * Sets the VM-exit instruction info. VMCS field.
1492 *
1493 * @param pVCpu The cross context virtual CPU structure.
1494 * @param uExitInstrInfo The VM-exit instruction information.
1495 */
1496DECL_FORCE_INLINE(void) iemVmxVmcsSetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitInstrInfo)
1497{
1498 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1499 pVmcs->u32RoExitInstrInfo = uExitInstrInfo;
1500}
1501
1502
1503/**
1504 * Implements VMSucceed for VMX instruction success.
1505 *
1506 * @param pVCpu The cross context virtual CPU structure.
1507 */
1508DECL_FORCE_INLINE(void) iemVmxVmSucceed(PVMCPU pVCpu)
1509{
1510 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1511}
1512
1513
1514/**
1515 * Implements VMFailInvalid for VMX instruction failure.
1516 *
1517 * @param pVCpu The cross context virtual CPU structure.
1518 */
1519DECL_FORCE_INLINE(void) iemVmxVmFailInvalid(PVMCPU pVCpu)
1520{
1521 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1522 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_CF;
1523}
1524
1525
1526/**
1527 * Implements VMFailValid for VMX instruction failure.
1528 *
1529 * @param pVCpu The cross context virtual CPU structure.
1530 * @param enmInsErr The VM instruction error.
1531 */
1532DECL_FORCE_INLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1533{
1534 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1535 {
1536 pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
1537 pVCpu->cpum.GstCtx.eflags.u32 |= X86_EFL_ZF;
1538 iemVmxVmcsSetVmInstrErr(pVCpu, enmInsErr);
1539 }
1540}
1541
1542
1543/**
1544 * Implements VMFail for VMX instruction failure.
1545 *
1546 * @param pVCpu The cross context virtual CPU structure.
1547 * @param enmInsErr The VM instruction error.
1548 */
1549DECL_FORCE_INLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
1550{
1551 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
1552 iemVmxVmFailValid(pVCpu, enmInsErr);
1553 else
1554 iemVmxVmFailInvalid(pVCpu);
1555}
1556
1557
1558/**
1559 * Checks if the given auto-load/store MSR area count is valid for the
1560 * implementation.
1561 *
1562 * @returns @c true if it's within the valid limit, @c false otherwise.
1563 * @param pVCpu The cross context virtual CPU structure.
1564 * @param uMsrCount The MSR area count to check.
1565 */
1566DECL_FORCE_INLINE(bool) iemVmxIsAutoMsrCountValid(PVMCPU pVCpu, uint32_t uMsrCount)
1567{
1568 uint64_t const u64VmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
1569 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(u64VmxMiscMsr);
1570 Assert(cMaxSupportedMsrs <= VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
1571 if (uMsrCount <= cMaxSupportedMsrs)
1572 return true;
1573 return false;
1574}
1575
1576
1577/**
1578 * Flushes the current VMCS contents back to guest memory.
1579 *
1580 * @returns VBox status code.
1581 * @param pVCpu The cross context virtual CPU structure.
1582 */
1583DECL_FORCE_INLINE(int) iemVmxCommitCurrentVmcsToMemory(PVMCPU pVCpu)
1584{
1585 Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
1586 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
1587 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));
1588 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
1589 return rc;
1590}
1591
1592
1593/**
1594 * Implements VMSucceed for the VMREAD instruction and increments the guest RIP.
1595 *
1596 * @param pVCpu The cross context virtual CPU structure.
1597 */
1598DECL_FORCE_INLINE(void) iemVmxVmreadSuccess(PVMCPU pVCpu, uint8_t cbInstr)
1599{
1600 iemVmxVmSucceed(pVCpu);
1601 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
1602}
1603
1604
1605/**
1606 * Gets the instruction diagnostic for segment base checks during VM-entry of a
1607 * nested-guest.
1608 *
1609 * @param iSegReg The segment index (X86_SREG_XXX).
1610 */
1611IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBase(unsigned iSegReg)
1612{
1613 switch (iSegReg)
1614 {
1615 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseCs;
1616 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseDs;
1617 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseEs;
1618 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseFs;
1619 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseGs;
1620 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseSs;
1621 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_1);
1622 }
1623}
1624
1625
1626/**
1627 * Gets the instruction diagnostic for segment base checks during VM-entry of a
1628 * nested-guest that is in Virtual-8086 mode.
1629 *
1630 * @param iSegReg The segment index (X86_SREG_XXX).
1631 */
1632IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegBaseV86(unsigned iSegReg)
1633{
1634 switch (iSegReg)
1635 {
1636 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegBaseV86Cs;
1637 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ds;
1638 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegBaseV86Es;
1639 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegBaseV86Fs;
1640 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegBaseV86Gs;
1641 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegBaseV86Ss;
1642 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_2);
1643 }
1644}
1645
1646
1647/**
1648 * Gets the instruction diagnostic for segment limit checks during VM-entry of a
1649 * nested-guest that is in Virtual-8086 mode.
1650 *
1651 * @param iSegReg The segment index (X86_SREG_XXX).
1652 */
1653IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegLimitV86(unsigned iSegReg)
1654{
1655 switch (iSegReg)
1656 {
1657 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegLimitV86Cs;
1658 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ds;
1659 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegLimitV86Es;
1660 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegLimitV86Fs;
1661 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegLimitV86Gs;
1662 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegLimitV86Ss;
1663 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_3);
1664 }
1665}
1666
1667
1668/**
1669 * Gets the instruction diagnostic for segment attribute checks during VM-entry of a
1670 * nested-guest that is in Virtual-8086 mode.
1671 *
1672 * @param iSegReg The segment index (X86_SREG_XXX).
1673 */
1674IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrV86(unsigned iSegReg)
1675{
1676 switch (iSegReg)
1677 {
1678 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrV86Cs;
1679 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ds;
1680 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrV86Es;
1681 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrV86Fs;
1682 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrV86Gs;
1683 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrV86Ss;
1684 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_4);
1685 }
1686}
1687
1688
1689/**
1690 * Gets the instruction diagnostic for segment attributes reserved bits failure
1691 * during VM-entry of a nested-guest.
1692 *
1693 * @param iSegReg The segment index (X86_SREG_XXX).
1694 */
1695IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrRsvd(unsigned iSegReg)
1696{
1697 switch (iSegReg)
1698 {
1699 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdCs;
1700 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdDs;
1701 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrRsvdEs;
1702 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdFs;
1703 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdGs;
1704 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrRsvdSs;
1705 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_5);
1706 }
1707}
1708
1709
1710/**
1711 * Gets the instruction diagnostic for segment attributes descriptor-type
1712 * (code/segment or system) failure during VM-entry of a nested-guest.
1713 *
1714 * @param iSegReg The segment index (X86_SREG_XXX).
1715 */
1716IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDescType(unsigned iSegReg)
1717{
1718 switch (iSegReg)
1719 {
1720 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeCs;
1721 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeDs;
1722 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeEs;
1723 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeFs;
1724 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeGs;
1725 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs;
1726 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_6);
1727 }
1728}
1729
1730
1731/**
1732 * Gets the instruction diagnostic for segment attributes descriptor-type
1733 * (code/segment or system) failure during VM-entry of a nested-guest.
1734 *
1735 * @param iSegReg The segment index (X86_SREG_XXX).
1736 */
1737IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrPresent(unsigned iSegReg)
1738{
1739 switch (iSegReg)
1740 {
1741 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrPresentCs;
1742 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrPresentDs;
1743 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrPresentEs;
1744 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrPresentFs;
1745 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrPresentGs;
1746 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrPresentSs;
1747 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_7);
1748 }
1749}
1750
1751
1752/**
1753 * Gets the instruction diagnostic for segment attribute granularity failure during
1754 * VM-entry of a nested-guest.
1755 *
1756 * @param iSegReg The segment index (X86_SREG_XXX).
1757 */
1758IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrGran(unsigned iSegReg)
1759{
1760 switch (iSegReg)
1761 {
1762 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrGranCs;
1763 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrGranDs;
1764 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrGranEs;
1765 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrGranFs;
1766 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrGranGs;
1767 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrGranSs;
1768 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_8);
1769 }
1770}
1771
1772/**
1773 * Gets the instruction diagnostic for segment attribute DPL/RPL failure during
1774 * VM-entry of a nested-guest.
1775 *
1776 * @param iSegReg The segment index (X86_SREG_XXX).
1777 */
1778IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrDplRpl(unsigned iSegReg)
1779{
1780 switch (iSegReg)
1781 {
1782 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplCs;
1783 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplDs;
1784 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrDplRplEs;
1785 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplFs;
1786 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplGs;
1787 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrDplRplSs;
1788 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_9);
1789 }
1790}
1791
1792
1793/**
1794 * Gets the instruction diagnostic for segment attribute type accessed failure
1795 * during VM-entry of a nested-guest.
1796 *
1797 * @param iSegReg The segment index (X86_SREG_XXX).
1798 */
1799IEM_STATIC VMXVDIAG iemVmxGetDiagVmentrySegAttrTypeAcc(unsigned iSegReg)
1800{
1801 switch (iSegReg)
1802 {
1803 case X86_SREG_CS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccCs;
1804 case X86_SREG_DS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccDs;
1805 case X86_SREG_ES: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccEs;
1806 case X86_SREG_FS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccFs;
1807 case X86_SREG_GS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccGs;
1808 case X86_SREG_SS: return kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs;
1809 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_10);
1810 }
1811}
1812
1813
1814/**
1815 * Gets the instruction diagnostic for guest CR3 referenced PDPTE reserved bits
1816 * failure during VM-entry of a nested-guest.
1817 *
1818 * @param iSegReg The PDPTE entry index.
1819 */
1820IEM_STATIC VMXVDIAG iemVmxGetDiagVmentryPdpteRsvd(unsigned iPdpte)
1821{
1822 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
1823 switch (iPdpte)
1824 {
1825 case 0: return kVmxVDiag_Vmentry_GuestPdpte0Rsvd;
1826 case 1: return kVmxVDiag_Vmentry_GuestPdpte1Rsvd;
1827 case 2: return kVmxVDiag_Vmentry_GuestPdpte2Rsvd;
1828 case 3: return kVmxVDiag_Vmentry_GuestPdpte3Rsvd;
1829 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_11);
1830 }
1831}
1832
1833
1834/**
1835 * Gets the instruction diagnostic for host CR3 referenced PDPTE reserved bits
1836 * failure during VM-exit of a nested-guest.
1837 *
1838 * @param iSegReg The PDPTE entry index.
1839 */
1840IEM_STATIC VMXVDIAG iemVmxGetDiagVmexitPdpteRsvd(unsigned iPdpte)
1841{
1842 Assert(iPdpte < X86_PG_PAE_PDPE_ENTRIES);
1843 switch (iPdpte)
1844 {
1845 case 0: return kVmxVDiag_Vmexit_HostPdpte0Rsvd;
1846 case 1: return kVmxVDiag_Vmexit_HostPdpte1Rsvd;
1847 case 2: return kVmxVDiag_Vmexit_HostPdpte2Rsvd;
1848 case 3: return kVmxVDiag_Vmexit_HostPdpte3Rsvd;
1849 IEM_NOT_REACHED_DEFAULT_CASE_RET2(kVmxVDiag_Ipe_12);
1850 }
1851}
1852
1853
1854/**
1855 * Saves the guest control registers, debug registers and some MSRs are part of
1856 * VM-exit.
1857 *
1858 * @param pVCpu The cross context virtual CPU structure.
1859 */
1860IEM_STATIC void iemVmxVmexitSaveGuestControlRegsMsrs(PVMCPU pVCpu)
1861{
1862 /*
1863 * Saves the guest control registers, debug registers and some MSRs.
1864 * See Intel spec. 27.3.1 "Saving Control Registers, Debug Registers and MSRs".
1865 */
1866 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1867
1868 /* Save control registers. */
1869 pVmcs->u64GuestCr0.u = pVCpu->cpum.GstCtx.cr0;
1870 pVmcs->u64GuestCr3.u = pVCpu->cpum.GstCtx.cr3;
1871 pVmcs->u64GuestCr4.u = pVCpu->cpum.GstCtx.cr4;
1872
1873 /* Save SYSENTER CS, ESP, EIP. */
1874 pVmcs->u32GuestSysenterCS = pVCpu->cpum.GstCtx.SysEnter.cs;
1875 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
1876 {
1877 pVmcs->u64GuestSysenterEsp.u = pVCpu->cpum.GstCtx.SysEnter.esp;
1878 pVmcs->u64GuestSysenterEip.u = pVCpu->cpum.GstCtx.SysEnter.eip;
1879 }
1880 else
1881 {
1882 pVmcs->u64GuestSysenterEsp.s.Lo = pVCpu->cpum.GstCtx.SysEnter.esp;
1883 pVmcs->u64GuestSysenterEip.s.Lo = pVCpu->cpum.GstCtx.SysEnter.eip;
1884 }
1885
1886 /* Save debug registers (DR7 and IA32_DEBUGCTL MSR). */
1887 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_DEBUG)
1888 {
1889 pVmcs->u64GuestDr7.u = pVCpu->cpum.GstCtx.dr[7];
1890 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
1891 }
1892
1893 /* Save PAT MSR. */
1894 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PAT_MSR)
1895 pVmcs->u64GuestPatMsr.u = pVCpu->cpum.GstCtx.msrPAT;
1896
1897 /* Save EFER MSR. */
1898 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_EFER_MSR)
1899 pVmcs->u64GuestEferMsr.u = pVCpu->cpum.GstCtx.msrEFER;
1900
1901 /* We don't support clearing IA32_BNDCFGS MSR yet. */
1902 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_CLEAR_BNDCFGS_MSR));
1903
1904 /* Nothing to do for SMBASE register - We don't support SMM yet. */
1905}
1906
1907
1908/**
1909 * Saves the guest force-flags in preparation of entering the nested-guest.
1910 *
1911 * @param pVCpu The cross context virtual CPU structure.
1912 */
1913IEM_STATIC void iemVmxVmentrySaveForceFlags(PVMCPU pVCpu)
1914{
1915 /* We shouldn't be called multiple times during VM-entry. */
1916 Assert(pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions == 0);
1917
1918 /* MTF should not be set outside VMX non-root mode. */
1919 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
1920
1921 /*
1922 * Preserve the required force-flags.
1923 *
1924 * We cache and clear force-flags that would affect the execution of the
1925 * nested-guest. Cached flags are then restored while returning to the guest
1926 * if necessary.
1927 *
1928 * - VMCPU_FF_INHIBIT_INTERRUPTS need not be cached as it only affects
1929 * interrupts until the completion of the current VMLAUNCH/VMRESUME
1930 * instruction. Interrupt inhibition for any nested-guest instruction
1931 * will be set later while loading the guest-interruptibility state.
1932 *
1933 * - VMCPU_FF_BLOCK_NMIS needs to be cached as VM-exits caused before
1934 * successful VM-entry needs to continue blocking NMIs if it was in effect
1935 * during VM-entry.
1936 *
1937 * - MTF need not be preserved as it's used only in VMX non-root mode and
1938 * is supplied on VM-entry through the VM-execution controls.
1939 *
1940 * The remaining FFs (e.g. timers, APIC updates) must stay in place so that
1941 * we will be able to generate interrupts that may cause VM-exits for
1942 * the nested-guest.
1943 */
1944 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
1945
1946 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_BLOCK_NMIS))
1947 VMCPU_FF_CLEAR_MASK(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_BLOCK_NMIS);
1948}
1949
1950
1951/**
1952 * Restores the guest force-flags in preparation of exiting the nested-guest.
1953 *
1954 * @param pVCpu The cross context virtual CPU structure.
1955 */
1956IEM_STATIC void iemVmxVmexitRestoreForceFlags(PVMCPU pVCpu)
1957{
1958 if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
1959 {
1960 VMCPU_FF_SET_MASK(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);
1961 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;
1962 }
1963}
1964
1965
1966/**
1967 * Perform a VMX transition updated PGM, IEM and CPUM.
1968 *
1969 * @param pVCpu The cross context virtual CPU structure.
1970 */
1971IEM_STATIC int iemVmxWorldSwitch(PVMCPU pVCpu)
1972{
1973 /*
1974 * Inform PGM about paging mode changes.
1975 * We include X86_CR0_PE because PGM doesn't handle paged-real mode yet,
1976 * see comment in iemMemPageTranslateAndCheckAccess().
1977 */
1978 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0 | X86_CR0_PE, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER);
1979# ifdef IN_RING3
1980 Assert(rc != VINF_PGM_CHANGE_MODE);
1981# endif
1982 AssertRCReturn(rc, rc);
1983
1984 /* Inform CPUM (recompiler), can later be removed. */
1985 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1986
1987 /*
1988 * Flush the TLB with new CR3. This is required in case the PGM mode change
1989 * above doesn't actually change anything.
1990 */
1991 if (rc == VINF_SUCCESS)
1992 {
1993 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, true);
1994 AssertRCReturn(rc, rc);
1995 }
1996
1997 /* Re-initialize IEM cache/state after the drastic mode switch. */
1998 iemReInitExec(pVCpu);
1999 return rc;
2000}
2001
2002
2003/**
2004 * Saves guest segment registers, GDTR, IDTR, LDTR, TR as part of VM-exit.
2005 *
2006 * @param pVCpu The cross context virtual CPU structure.
2007 */
2008IEM_STATIC void iemVmxVmexitSaveGuestSegRegs(PVMCPU pVCpu)
2009{
2010 /*
2011 * Save guest segment registers, GDTR, IDTR, LDTR, TR.
2012 * See Intel spec 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
2013 */
2014 /* CS, SS, ES, DS, FS, GS. */
2015 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2016 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
2017 {
2018 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
2019 if (!pSelReg->Attr.n.u1Unusable)
2020 iemVmxVmcsSetGuestSegReg(pVmcs, iSegReg, pSelReg);
2021 else
2022 {
2023 /*
2024 * For unusable segments the attributes are undefined except for CS and SS.
2025 * For the rest we don't bother preserving anything but the unusable bit.
2026 */
2027 switch (iSegReg)
2028 {
2029 case X86_SREG_CS:
2030 pVmcs->GuestCs = pSelReg->Sel;
2031 pVmcs->u64GuestCsBase.u = pSelReg->u64Base;
2032 pVmcs->u32GuestCsLimit = pSelReg->u32Limit;
2033 pVmcs->u32GuestCsAttr = pSelReg->Attr.u & ( X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
2034 | X86DESCATTR_UNUSABLE);
2035 break;
2036
2037 case X86_SREG_SS:
2038 pVmcs->GuestSs = pSelReg->Sel;
2039 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2040 pVmcs->u64GuestSsBase.u &= UINT32_C(0xffffffff);
2041 pVmcs->u32GuestSsAttr = pSelReg->Attr.u & (X86DESCATTR_DPL | X86DESCATTR_UNUSABLE);
2042 break;
2043
2044 case X86_SREG_DS:
2045 pVmcs->GuestDs = pSelReg->Sel;
2046 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2047 pVmcs->u64GuestDsBase.u &= UINT32_C(0xffffffff);
2048 pVmcs->u32GuestDsAttr = X86DESCATTR_UNUSABLE;
2049 break;
2050
2051 case X86_SREG_ES:
2052 pVmcs->GuestEs = pSelReg->Sel;
2053 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2054 pVmcs->u64GuestEsBase.u &= UINT32_C(0xffffffff);
2055 pVmcs->u32GuestEsAttr = X86DESCATTR_UNUSABLE;
2056 break;
2057
2058 case X86_SREG_FS:
2059 pVmcs->GuestFs = pSelReg->Sel;
2060 pVmcs->u64GuestFsBase.u = pSelReg->u64Base;
2061 pVmcs->u32GuestFsAttr = X86DESCATTR_UNUSABLE;
2062 break;
2063
2064 case X86_SREG_GS:
2065 pVmcs->GuestGs = pSelReg->Sel;
2066 pVmcs->u64GuestGsBase.u = pSelReg->u64Base;
2067 pVmcs->u32GuestGsAttr = X86DESCATTR_UNUSABLE;
2068 break;
2069 }
2070 }
2071 }
2072
2073 /* Segment attribute bits 31:7 and 11:8 MBZ. */
2074 uint32_t const fValidAttrMask = X86DESCATTR_TYPE | X86DESCATTR_DT | X86DESCATTR_DPL | X86DESCATTR_P
2075 | X86DESCATTR_AVL | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G | X86DESCATTR_UNUSABLE;
2076 /* LDTR. */
2077 {
2078 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.ldtr;
2079 pVmcs->GuestLdtr = pSelReg->Sel;
2080 pVmcs->u64GuestLdtrBase.u = pSelReg->u64Base;
2081 Assert(X86_IS_CANONICAL(pSelReg->u64Base));
2082 pVmcs->u32GuestLdtrLimit = pSelReg->u32Limit;
2083 pVmcs->u32GuestLdtrAttr = pSelReg->Attr.u & fValidAttrMask;
2084 }
2085
2086 /* TR. */
2087 {
2088 PCCPUMSELREG pSelReg = &pVCpu->cpum.GstCtx.tr;
2089 pVmcs->GuestTr = pSelReg->Sel;
2090 pVmcs->u64GuestTrBase.u = pSelReg->u64Base;
2091 pVmcs->u32GuestTrLimit = pSelReg->u32Limit;
2092 pVmcs->u32GuestTrAttr = pSelReg->Attr.u & fValidAttrMask;
2093 }
2094
2095 /* GDTR. */
2096 pVmcs->u64GuestGdtrBase.u = pVCpu->cpum.GstCtx.gdtr.pGdt;
2097 pVmcs->u32GuestGdtrLimit = pVCpu->cpum.GstCtx.gdtr.cbGdt;
2098
2099 /* IDTR. */
2100 pVmcs->u64GuestIdtrBase.u = pVCpu->cpum.GstCtx.idtr.pIdt;
2101 pVmcs->u32GuestIdtrLimit = pVCpu->cpum.GstCtx.idtr.cbIdt;
2102}
2103
2104
2105/**
2106 * Saves guest non-register state as part of VM-exit.
2107 *
2108 * @param pVCpu The cross context virtual CPU structure.
2109 * @param uExitReason The VM-exit reason.
2110 */
2111IEM_STATIC void iemVmxVmexitSaveGuestNonRegState(PVMCPU pVCpu, uint32_t uExitReason)
2112{
2113 /*
2114 * Save guest non-register state.
2115 * See Intel spec. 27.3.4 "Saving Non-Register State".
2116 */
2117 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2118
2119 /* Activity-state: VM-exits occur before changing the activity state, nothing further to do */
2120
2121 /* Interruptibility-state. */
2122 pVmcs->u32GuestIntrState = 0;
2123 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
2124 { /** @todo NSTVMX: Virtual-NMI blocking. */ }
2125 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2126 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
2127
2128 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2129 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
2130 {
2131 /** @todo NSTVMX: We can't distinguish between blocking-by-MovSS and blocking-by-STI
2132 * currently. */
2133 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
2134 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2135 }
2136 /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */
2137
2138 /* Pending debug exceptions. */
2139 if ( uExitReason != VMX_EXIT_INIT_SIGNAL
2140 && uExitReason != VMX_EXIT_SMI
2141 && uExitReason != VMX_EXIT_ERR_MACHINE_CHECK
2142 && !HMVmxIsTrapLikeVmexit(uExitReason))
2143 {
2144 /** @todo NSTVMX: also must exclude VM-exits caused by debug exceptions when
2145 * block-by-MovSS is in effect. */
2146 pVmcs->u64GuestPendingDbgXcpt.u = 0;
2147 }
2148
2149 /* Save VMX-preemption timer value. */
2150 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER)
2151 {
2152 uint32_t uPreemptTimer;
2153 if (uExitReason == VMX_EXIT_PREEMPT_TIMER)
2154 uPreemptTimer = 0;
2155 else
2156 {
2157 /*
2158 * Assume the following:
2159 * PreemptTimerShift = 5
2160 * VmcsPreemptTimer = 2 (i.e. need to decrement by 1 every 2 * RT_BIT(5) = 20000 TSC ticks)
2161 * VmentryTick = 50000 (TSC at time of VM-entry)
2162 *
2163 * CurTick Delta PreemptTimerVal
2164 * ----------------------------------
2165 * 60000 10000 2
2166 * 80000 30000 1
2167 * 90000 40000 0 -> VM-exit.
2168 *
2169 * If Delta >= VmcsPreemptTimer * RT_BIT(PreemptTimerShift) cause a VMX-preemption timer VM-exit.
2170 *
2171 * The saved VMX-preemption timer value is calculated as follows:
2172 * PreemptTimerVal = VmcsPreemptTimer - (Delta / (VmcsPreemptTimer * RT_BIT(PreemptTimerShift)))
2173 * E.g.:
2174 * Delta = 10000
2175 * Tmp = 10000 / (2 * 10000) = 0.5
2176 * NewPt = 2 - 0.5 = 2
2177 * Delta = 30000
2178 * Tmp = 30000 / (2 * 10000) = 1.5
2179 * NewPt = 2 - 1.5 = 1
2180 * Delta = 40000
2181 * Tmp = 40000 / 20000 = 2
2182 * NewPt = 2 - 2 = 0
2183 */
2184 uint64_t const uCurTick = TMCpuTickGetNoCheck(pVCpu);
2185 uint64_t const uVmentryTick = pVCpu->cpum.GstCtx.hwvirt.vmx.uVmentryTick;
2186 uint64_t const uDelta = uCurTick - uVmentryTick;
2187 uint32_t const uVmcsPreemptVal = pVmcs->u32PreemptTimer;
2188 uPreemptTimer = uVmcsPreemptVal - ASMDivU64ByU32RetU32(uDelta, uVmcsPreemptVal * RT_BIT(VMX_V_PREEMPT_TIMER_SHIFT));
2189 }
2190
2191 pVmcs->u32PreemptTimer = uPreemptTimer;
2192 }
2193
2194
2195 /* PDPTEs. */
2196 /* We don't support EPT yet. */
2197 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
2198 pVmcs->u64GuestPdpte0.u = 0;
2199 pVmcs->u64GuestPdpte1.u = 0;
2200 pVmcs->u64GuestPdpte2.u = 0;
2201 pVmcs->u64GuestPdpte3.u = 0;
2202}
2203
2204
2205/**
2206 * Saves the guest-state as part of VM-exit.
2207 *
2208 * @returns VBox status code.
2209 * @param pVCpu The cross context virtual CPU structure.
2210 * @param uExitReason The VM-exit reason.
2211 */
2212IEM_STATIC void iemVmxVmexitSaveGuestState(PVMCPU pVCpu, uint32_t uExitReason)
2213{
2214 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2215 Assert(pVmcs);
2216
2217 iemVmxVmexitSaveGuestControlRegsMsrs(pVCpu);
2218 iemVmxVmexitSaveGuestSegRegs(pVCpu);
2219
2220 /*
2221 * Save guest RIP, RSP and RFLAGS.
2222 * See Intel spec. 27.3.3 "Saving RIP, RSP and RFLAGS".
2223 *
2224 * For trap-like VM-exits we must advance the RIP by the length of the instruction.
2225 * Callers must pass the instruction length in the VM-exit instruction length
2226 * field though it is undefined for such VM-exits. After updating RIP here, we clear
2227 * the VM-exit instruction length field.
2228 *
2229 * See Intel spec. 27.1 "Architectural State Before A VM Exit"
2230 */
2231 if (HMVmxIsTrapLikeVmexit(uExitReason))
2232 {
2233 uint8_t const cbInstr = pVmcs->u32RoExitInstrLen;
2234 AssertMsg(cbInstr >= 1 && cbInstr <= 15, ("uReason=%u cbInstr=%u\n", uExitReason, cbInstr));
2235 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
2236 iemVmxVmcsSetExitInstrLen(pVCpu, 0 /* cbInstr */);
2237 }
2238
2239 /* We don't support enclave mode yet. */
2240 pVmcs->u64GuestRip.u = pVCpu->cpum.GstCtx.rip;
2241 pVmcs->u64GuestRsp.u = pVCpu->cpum.GstCtx.rsp;
2242 pVmcs->u64GuestRFlags.u = pVCpu->cpum.GstCtx.rflags.u; /** @todo NSTVMX: Check RFLAGS.RF handling. */
2243
2244 iemVmxVmexitSaveGuestNonRegState(pVCpu, uExitReason);
2245}
2246
2247
2248/**
2249 * Saves the guest MSRs into the VM-exit auto-store MSRs area as part of VM-exit.
2250 *
2251 * @returns VBox status code.
2252 * @param pVCpu The cross context virtual CPU structure.
2253 * @param uExitReason The VM-exit reason (for diagnostic purposes).
2254 */
2255IEM_STATIC int iemVmxVmexitSaveGuestAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
2256{
2257 /*
2258 * Save guest MSRs.
2259 * See Intel spec. 27.4 "Saving MSRs".
2260 */
2261 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2262 const char *const pszFailure = "VMX-abort";
2263
2264 /*
2265 * The VM-exit MSR-store area address need not be a valid guest-physical address if the
2266 * VM-exit MSR-store count is 0. If this is the case, bail early without reading it.
2267 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
2268 */
2269 uint32_t const cMsrs = pVmcs->u32ExitMsrStoreCount;
2270 if (!cMsrs)
2271 return VINF_SUCCESS;
2272
2273 /*
2274 * Verify the MSR auto-store count. Physical CPUs can behave unpredictably if the count
2275 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
2276 * implementation causes a VMX-abort followed by a triple-fault.
2277 */
2278 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
2279 if (fIsMsrCountValid)
2280 { /* likely */ }
2281 else
2282 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreCount);
2283
2284 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
2285 Assert(pMsr);
2286 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
2287 {
2288 if ( !pMsr->u32Reserved
2289 && pMsr->u32Msr != MSR_IA32_SMBASE
2290 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
2291 {
2292 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(pVCpu, pMsr->u32Msr, &pMsr->u64Value);
2293 if (rcStrict == VINF_SUCCESS)
2294 continue;
2295
2296 /*
2297 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
2298 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
2299 * recording the MSR index in the auxiliary info. field and indicated further by our
2300 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
2301 * if possible, or come up with a better, generic solution.
2302 */
2303 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2304 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_READ
2305 ? kVmxVDiag_Vmexit_MsrStoreRing3
2306 : kVmxVDiag_Vmexit_MsrStore;
2307 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2308 }
2309 else
2310 {
2311 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2312 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStoreRsvd);
2313 }
2314 }
2315
2316 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrStore.u;
2317 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysAutoMsrArea,
2318 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea), VMX_V_AUTOMSR_AREA_SIZE);
2319 if (RT_SUCCESS(rc))
2320 { /* likely */ }
2321 else
2322 {
2323 AssertMsgFailed(("VM-exit: Failed to write MSR auto-store area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
2324 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrStorePtrWritePhys);
2325 }
2326
2327 NOREF(uExitReason);
2328 NOREF(pszFailure);
2329 return VINF_SUCCESS;
2330}
2331
2332
2333/**
2334 * Performs a VMX abort (due to an fatal error during VM-exit).
2335 *
2336 * @returns Strict VBox status code.
2337 * @param pVCpu The cross context virtual CPU structure.
2338 * @param enmAbort The VMX abort reason.
2339 */
2340IEM_STATIC VBOXSTRICTRC iemVmxAbort(PVMCPU pVCpu, VMXABORT enmAbort)
2341{
2342 /*
2343 * Perform the VMX abort.
2344 * See Intel spec. 27.7 "VMX Aborts".
2345 */
2346 LogFunc(("enmAbort=%u (%s) -> RESET\n", enmAbort, HMVmxGetAbortDesc(enmAbort)));
2347
2348 /* We don't support SMX yet. */
2349 pVCpu->cpum.GstCtx.hwvirt.vmx.enmAbort = enmAbort;
2350 if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
2351 {
2352 RTGCPHYS const GCPhysVmcs = IEM_VMX_GET_CURRENT_VMCS(pVCpu);
2353 uint32_t const offVmxAbort = RT_UOFFSETOF(VMXVVMCS, u32VmxAbortId);
2354 PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcs + offVmxAbort, &enmAbort, sizeof(enmAbort));
2355 }
2356
2357 return VINF_EM_TRIPLE_FAULT;
2358}
2359
2360
2361/**
2362 * Loads host control registers, debug registers and MSRs as part of VM-exit.
2363 *
2364 * @param pVCpu The cross context virtual CPU structure.
2365 */
2366IEM_STATIC void iemVmxVmexitLoadHostControlRegsMsrs(PVMCPU pVCpu)
2367{
2368 /*
2369 * Load host control registers, debug registers and MSRs.
2370 * See Intel spec. 27.5.1 "Loading Host Control Registers, Debug Registers, MSRs".
2371 */
2372 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2373 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2374
2375 /* CR0. */
2376 {
2377 /* Bits 63:32, 28:19, 17, 15:6, ET, CD, NW and CR0 MB1 bits are not modified. */
2378 uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
2379 uint64_t const fCr0IgnMask = UINT64_C(0xffffffff1ff8ffc0) | X86_CR0_ET | X86_CR0_CD | X86_CR0_NW | uCr0Fixed0;
2380 uint64_t const uHostCr0 = pVmcs->u64HostCr0.u;
2381 uint64_t const uGuestCr0 = pVCpu->cpum.GstCtx.cr0;
2382 uint64_t const uValidCr0 = (uHostCr0 & ~fCr0IgnMask) | (uGuestCr0 & fCr0IgnMask);
2383 CPUMSetGuestCR0(pVCpu, uValidCr0);
2384 }
2385
2386 /* CR4. */
2387 {
2388 /* CR4 MB1 bits are not modified. */
2389 uint64_t const fCr4IgnMask = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
2390 uint64_t const uHostCr4 = pVmcs->u64HostCr4.u;
2391 uint64_t const uGuestCr4 = pVCpu->cpum.GstCtx.cr4;
2392 uint64_t uValidCr4 = (uHostCr4 & ~fCr4IgnMask) | (uGuestCr4 & fCr4IgnMask);
2393 if (fHostInLongMode)
2394 uValidCr4 |= X86_CR4_PAE;
2395 else
2396 uValidCr4 &= ~X86_CR4_PCIDE;
2397 CPUMSetGuestCR4(pVCpu, uValidCr4);
2398 }
2399
2400 /* CR3 (host value validated while checking host-state during VM-entry). */
2401 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64HostCr3.u;
2402
2403 /* DR7. */
2404 pVCpu->cpum.GstCtx.dr[7] = X86_DR7_INIT_VAL;
2405
2406 /** @todo NSTVMX: Support IA32_DEBUGCTL MSR */
2407
2408 /* Save SYSENTER CS, ESP, EIP (host value validated while checking host-state during VM-entry). */
2409 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64HostSysenterEip.u;
2410 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64HostSysenterEsp.u;
2411 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32HostSysenterCs;
2412
2413 /* FS, GS bases are loaded later while we load host segment registers. */
2414
2415 /* EFER MSR (host value validated while checking host-state during VM-entry). */
2416 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
2417 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64HostEferMsr.u;
2418 else if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
2419 {
2420 if (fHostInLongMode)
2421 pVCpu->cpum.GstCtx.msrEFER |= (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
2422 else
2423 pVCpu->cpum.GstCtx.msrEFER &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
2424 }
2425
2426 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
2427
2428 /* PAT MSR (host value is validated while checking host-state during VM-entry). */
2429 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
2430 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64HostPatMsr.u;
2431
2432 /* We don't support IA32_BNDCFGS MSR yet. */
2433}
2434
2435
2436/**
2437 * Loads host segment registers, GDTR, IDTR, LDTR and TR as part of VM-exit.
2438 *
2439 * @param pVCpu The cross context virtual CPU structure.
2440 */
2441IEM_STATIC void iemVmxVmexitLoadHostSegRegs(PVMCPU pVCpu)
2442{
2443 /*
2444 * Load host segment registers, GDTR, IDTR, LDTR and TR.
2445 * See Intel spec. 27.5.2 "Loading Host Segment and Descriptor-Table Registers".
2446 *
2447 * Warning! Be careful to not touch fields that are reserved by VT-x,
2448 * e.g. segment limit high bits stored in segment attributes (in bits 11:8).
2449 */
2450 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2451 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2452
2453 /* CS, SS, ES, DS, FS, GS. */
2454 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
2455 {
2456 RTSEL const HostSel = iemVmxVmcsGetHostSelReg(pVmcs, iSegReg);
2457 bool const fUnusable = RT_BOOL(HostSel == 0);
2458
2459 /* Selector. */
2460 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel = HostSel;
2461 pVCpu->cpum.GstCtx.aSRegs[iSegReg].ValidSel = HostSel;
2462 pVCpu->cpum.GstCtx.aSRegs[iSegReg].fFlags = CPUMSELREG_FLAGS_VALID;
2463
2464 /* Limit. */
2465 pVCpu->cpum.GstCtx.aSRegs[iSegReg].u32Limit = 0xffffffff;
2466
2467 /* Base and Attributes. */
2468 switch (iSegReg)
2469 {
2470 case X86_SREG_CS:
2471 {
2472 pVCpu->cpum.GstCtx.cs.u64Base = 0;
2473 pVCpu->cpum.GstCtx.cs.Attr.n.u4Type = X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED;
2474 pVCpu->cpum.GstCtx.ss.Attr.n.u1DescType = 1;
2475 pVCpu->cpum.GstCtx.cs.Attr.n.u2Dpl = 0;
2476 pVCpu->cpum.GstCtx.cs.Attr.n.u1Present = 1;
2477 pVCpu->cpum.GstCtx.cs.Attr.n.u1Long = fHostInLongMode;
2478 pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig = !fHostInLongMode;
2479 pVCpu->cpum.GstCtx.cs.Attr.n.u1Granularity = 1;
2480 Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Unusable);
2481 Assert(!fUnusable);
2482 break;
2483 }
2484
2485 case X86_SREG_SS:
2486 case X86_SREG_ES:
2487 case X86_SREG_DS:
2488 {
2489 pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base = 0;
2490 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2491 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DescType = 1;
2492 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u2Dpl = 0;
2493 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Present = 1;
2494 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1DefBig = 1;
2495 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Granularity = 1;
2496 pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable = fUnusable;
2497 break;
2498 }
2499
2500 case X86_SREG_FS:
2501 {
2502 Assert(X86_IS_CANONICAL(pVmcs->u64HostFsBase.u));
2503 pVCpu->cpum.GstCtx.fs.u64Base = !fUnusable ? pVmcs->u64HostFsBase.u : 0;
2504 pVCpu->cpum.GstCtx.fs.Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2505 pVCpu->cpum.GstCtx.fs.Attr.n.u1DescType = 1;
2506 pVCpu->cpum.GstCtx.fs.Attr.n.u2Dpl = 0;
2507 pVCpu->cpum.GstCtx.fs.Attr.n.u1Present = 1;
2508 pVCpu->cpum.GstCtx.fs.Attr.n.u1DefBig = 1;
2509 pVCpu->cpum.GstCtx.fs.Attr.n.u1Granularity = 1;
2510 pVCpu->cpum.GstCtx.fs.Attr.n.u1Unusable = fUnusable;
2511 break;
2512 }
2513
2514 case X86_SREG_GS:
2515 {
2516 Assert(X86_IS_CANONICAL(pVmcs->u64HostGsBase.u));
2517 pVCpu->cpum.GstCtx.gs.u64Base = !fUnusable ? pVmcs->u64HostGsBase.u : 0;
2518 pVCpu->cpum.GstCtx.gs.Attr.n.u4Type = X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED;
2519 pVCpu->cpum.GstCtx.gs.Attr.n.u1DescType = 1;
2520 pVCpu->cpum.GstCtx.gs.Attr.n.u2Dpl = 0;
2521 pVCpu->cpum.GstCtx.gs.Attr.n.u1Present = 1;
2522 pVCpu->cpum.GstCtx.gs.Attr.n.u1DefBig = 1;
2523 pVCpu->cpum.GstCtx.gs.Attr.n.u1Granularity = 1;
2524 pVCpu->cpum.GstCtx.gs.Attr.n.u1Unusable = fUnusable;
2525 break;
2526 }
2527 }
2528 }
2529
2530 /* TR. */
2531 Assert(X86_IS_CANONICAL(pVmcs->u64HostTrBase.u));
2532 Assert(!pVCpu->cpum.GstCtx.tr.Attr.n.u1Unusable);
2533 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->HostTr;
2534 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->HostTr;
2535 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2536 pVCpu->cpum.GstCtx.tr.u32Limit = X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN;
2537 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64HostTrBase.u;
2538 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
2539 pVCpu->cpum.GstCtx.tr.Attr.n.u1DescType = 0;
2540 pVCpu->cpum.GstCtx.tr.Attr.n.u2Dpl = 0;
2541 pVCpu->cpum.GstCtx.tr.Attr.n.u1Present = 1;
2542 pVCpu->cpum.GstCtx.tr.Attr.n.u1DefBig = 0;
2543 pVCpu->cpum.GstCtx.tr.Attr.n.u1Granularity = 0;
2544
2545 /* LDTR. */
2546 pVCpu->cpum.GstCtx.ldtr.Sel = 0;
2547 pVCpu->cpum.GstCtx.ldtr.ValidSel = 0;
2548 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2549 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
2550 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
2551 pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Unusable = 1;
2552
2553 /* GDTR. */
2554 Assert(X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u));
2555 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64HostGdtrBase.u;
2556 pVCpu->cpum.GstCtx.gdtr.cbGdt = 0xfff;
2557
2558 /* IDTR.*/
2559 Assert(X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u));
2560 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64HostIdtrBase.u;
2561 pVCpu->cpum.GstCtx.idtr.cbIdt = 0xfff;
2562}
2563
2564
2565/**
2566 * Checks host PDPTes as part of VM-exit.
2567 *
2568 * @param pVCpu The cross context virtual CPU structure.
2569 * @param uExitReason The VM-exit reason (for logging purposes).
2570 */
2571IEM_STATIC int iemVmxVmexitCheckHostPdptes(PVMCPU pVCpu, uint32_t uExitReason)
2572{
2573 /*
2574 * Check host PDPTEs.
2575 * See Intel spec. 27.5.4 "Checking and Loading Host Page-Directory-Pointer-Table Entries".
2576 */
2577 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2578 const char *const pszFailure = "VMX-abort";
2579 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2580
2581 if ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
2582 && !fHostInLongMode)
2583 {
2584 uint64_t const uHostCr3 = pVCpu->cpum.GstCtx.cr3 & X86_CR3_PAE_PAGE_MASK;
2585 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
2586 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uHostCr3, sizeof(aPdptes));
2587 if (RT_SUCCESS(rc))
2588 {
2589 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
2590 {
2591 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
2592 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
2593 { /* likely */ }
2594 else
2595 {
2596 VMXVDIAG const enmDiag = iemVmxGetDiagVmexitPdpteRsvd(iPdpte);
2597 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2598 }
2599 }
2600 }
2601 else
2602 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_HostPdpteCr3ReadPhys);
2603 }
2604
2605 NOREF(pszFailure);
2606 NOREF(uExitReason);
2607 return VINF_SUCCESS;
2608}
2609
2610
2611/**
2612 * Loads the host MSRs from the VM-exit auto-load MSRs area as part of VM-exit.
2613 *
2614 * @returns VBox status code.
2615 * @param pVCpu The cross context virtual CPU structure.
2616 * @param pszInstr The VMX instruction name (for logging purposes).
2617 */
2618IEM_STATIC int iemVmxVmexitLoadHostAutoMsrs(PVMCPU pVCpu, uint32_t uExitReason)
2619{
2620 /*
2621 * Load host MSRs.
2622 * See Intel spec. 27.6 "Loading MSRs".
2623 */
2624 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2625 const char *const pszFailure = "VMX-abort";
2626
2627 /*
2628 * The VM-exit MSR-load area address need not be a valid guest-physical address if the
2629 * VM-exit MSR load count is 0. If this is the case, bail early without reading it.
2630 * See Intel spec. 24.7.2 "VM-Exit Controls for MSRs".
2631 */
2632 uint32_t const cMsrs = pVmcs->u32ExitMsrLoadCount;
2633 if (!cMsrs)
2634 return VINF_SUCCESS;
2635
2636 /*
2637 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count
2638 * is exceeded including possibly raising #MC exceptions during VMX transition. Our
2639 * implementation causes a VMX-abort followed by a triple-fault.
2640 */
2641 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
2642 if (fIsMsrCountValid)
2643 { /* likely */ }
2644 else
2645 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadCount);
2646
2647 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrExitMsrLoad.u;
2648 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
2649 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
2650 if (RT_SUCCESS(rc))
2651 {
2652 PCVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
2653 Assert(pMsr);
2654 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
2655 {
2656 if ( !pMsr->u32Reserved
2657 && pMsr->u32Msr != MSR_K8_FS_BASE
2658 && pMsr->u32Msr != MSR_K8_GS_BASE
2659 && pMsr->u32Msr != MSR_K6_EFER
2660 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
2661 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
2662 {
2663 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
2664 if (rcStrict == VINF_SUCCESS)
2665 continue;
2666
2667 /*
2668 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-exit.
2669 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VMX-abort
2670 * recording the MSR index in the auxiliary info. field and indicated further by our
2671 * own, specific diagnostic code. Later, we can try implement handling of the MSR in ring-0
2672 * if possible, or come up with a better, generic solution.
2673 */
2674 pVCpu->cpum.GstCtx.hwvirt.vmx.uAbortAux = pMsr->u32Msr;
2675 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
2676 ? kVmxVDiag_Vmexit_MsrLoadRing3
2677 : kVmxVDiag_Vmexit_MsrLoad;
2678 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, enmDiag);
2679 }
2680 else
2681 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadRsvd);
2682 }
2683 }
2684 else
2685 {
2686 AssertMsgFailed(("VM-exit: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", GCPhysAutoMsrArea, rc));
2687 IEM_VMX_VMEXIT_FAILED_RET(pVCpu, uExitReason, pszFailure, kVmxVDiag_Vmexit_MsrLoadPtrReadPhys);
2688 }
2689
2690 NOREF(uExitReason);
2691 NOREF(pszFailure);
2692 return VINF_SUCCESS;
2693}
2694
2695
2696/**
2697 * Loads the host state as part of VM-exit.
2698 *
2699 * @returns Strict VBox status code.
2700 * @param pVCpu The cross context virtual CPU structure.
2701 * @param uExitReason The VM-exit reason (for logging purposes).
2702 */
2703IEM_STATIC VBOXSTRICTRC iemVmxVmexitLoadHostState(PVMCPU pVCpu, uint32_t uExitReason)
2704{
2705 /*
2706 * Load host state.
2707 * See Intel spec. 27.5 "Loading Host State".
2708 */
2709 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2710 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
2711 bool const fVirtApicAccess = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
2712
2713 /* We cannot return from a long-mode guest to a host that is not in long mode. */
2714 if ( CPUMIsGuestInLongMode(pVCpu)
2715 && !fHostInLongMode)
2716 {
2717 Log(("VM-exit from long-mode guest to host not in long-mode -> VMX-Abort\n"));
2718 return iemVmxAbort(pVCpu, VMXABORT_HOST_NOT_IN_LONG_MODE);
2719 }
2720
2721 iemVmxVmexitLoadHostControlRegsMsrs(pVCpu);
2722 iemVmxVmexitLoadHostSegRegs(pVCpu);
2723
2724 /*
2725 * Load host RIP, RSP and RFLAGS.
2726 * See Intel spec. 27.5.3 "Loading Host RIP, RSP and RFLAGS"
2727 */
2728 pVCpu->cpum.GstCtx.rip = pVmcs->u64HostRip.u;
2729 pVCpu->cpum.GstCtx.rsp = pVmcs->u64HostRsp.u;
2730 pVCpu->cpum.GstCtx.rflags.u = X86_EFL_1;
2731
2732 /* Update non-register state. */
2733 iemVmxVmexitRestoreForceFlags(pVCpu);
2734
2735 /* Clear address range monitoring. */
2736 EMMonitorWaitClear(pVCpu);
2737
2738 /* De-register the handler for the APIC-access page. */
2739 if (fVirtApicAccess)
2740 {
2741 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
2742 int rc = CPUMVmxApicAccessPageDeregister(pVCpu, GCPhysApicAccess);
2743 if (RT_FAILURE(rc))
2744 return rc;
2745 }
2746
2747 /* Perform the VMX transition (PGM updates). */
2748 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
2749 if (rcStrict == VINF_SUCCESS)
2750 {
2751 /* Check host PDPTEs (only when we've fully switched page tables_. */
2752 /** @todo r=ramshankar: I don't know if PGM does this for us already or not... */
2753 int rc = iemVmxVmexitCheckHostPdptes(pVCpu, uExitReason);
2754 if (RT_FAILURE(rc))
2755 {
2756 Log(("VM-exit failed while restoring host PDPTEs -> VMX-Abort\n"));
2757 return iemVmxAbort(pVCpu, VMXBOART_HOST_PDPTE);
2758 }
2759 }
2760 else if (RT_SUCCESS(rcStrict))
2761 {
2762 Log3(("VM-exit: iemVmxWorldSwitch returns %Rrc (uExitReason=%u) -> Setting passup status\n", VBOXSTRICTRC_VAL(rcStrict),
2763 uExitReason));
2764 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2765 }
2766 else
2767 {
2768 Log3(("VM-exit: iemVmxWorldSwitch failed! rc=%Rrc (uExitReason=%u)\n", VBOXSTRICTRC_VAL(rcStrict), uExitReason));
2769 return VBOXSTRICTRC_VAL(rcStrict);
2770 }
2771
2772 Assert(rcStrict == VINF_SUCCESS);
2773
2774 /* Load MSRs from the VM-exit auto-load MSR area. */
2775 int rc = iemVmxVmexitLoadHostAutoMsrs(pVCpu, uExitReason);
2776 if (RT_FAILURE(rc))
2777 {
2778 Log(("VM-exit failed while loading host MSRs -> VMX-Abort\n"));
2779 return iemVmxAbort(pVCpu, VMXABORT_LOAD_HOST_MSR);
2780 }
2781
2782 return rcStrict;
2783}
2784
2785
2786/**
2787 * VMX VM-exit handler.
2788 *
2789 * @returns Strict VBox status code.
2790 * @retval VINF_VMX_VMEXIT when the VM-exit is successful.
2791 * @retval VINF_EM_TRIPLE_FAULT when VM-exit is unsuccessful and leads to a
2792 * triple-fault.
2793 *
2794 * @param pVCpu The cross context virtual CPU structure.
2795 * @param uExitReason The VM-exit reason.
2796 *
2797 * @remarks Make sure VM-exit qualification is updated before calling this
2798 * function!
2799 */
2800IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason)
2801{
2802 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
2803 Assert(pVmcs);
2804
2805 pVmcs->u32RoExitReason = uExitReason;
2806
2807 /** @todo NSTVMX: IEMGetCurrentXcpt will be VM-exit interruption info. */
2808 /** @todo NSTVMX: The source event should be recorded in IDT-vectoring info
2809 * during injection. */
2810
2811 /*
2812 * Save the guest state back into the VMCS.
2813 * We only need to save the state when the VM-entry was successful.
2814 */
2815 bool const fVmentryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
2816 if (!fVmentryFailed)
2817 {
2818 iemVmxVmexitSaveGuestState(pVCpu, uExitReason);
2819 int rc = iemVmxVmexitSaveGuestAutoMsrs(pVCpu, uExitReason);
2820 if (RT_SUCCESS(rc))
2821 { /* likely */ }
2822 else
2823 return iemVmxAbort(pVCpu, VMXABORT_SAVE_GUEST_MSRS);
2824 }
2825
2826 /*
2827 * The high bits of the VM-exit reason are only relevant when the VM-exit occurs in
2828 * enclave mode/SMM which we don't support yet. If we ever add support for it, we can
2829 * pass just the lower bits, till then an assert should suffice.
2830 */
2831 Assert(!RT_HI_U16(uExitReason));
2832
2833 VBOXSTRICTRC rcStrict = iemVmxVmexitLoadHostState(pVCpu, uExitReason);
2834 if (RT_FAILURE(rcStrict))
2835 LogFunc(("Loading host-state failed. uExitReason=%u rc=%Rrc\n", uExitReason, VBOXSTRICTRC_VAL(rcStrict)));
2836
2837 /* We're no longer in nested-guest execution mode. */
2838 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = false;
2839
2840 Assert(rcStrict == VINF_SUCCESS);
2841 return VINF_VMX_VMEXIT;
2842}
2843
2844
2845/**
2846 * VMX VM-exit handler for VM-exits due to instruction execution.
2847 *
2848 * This is intended for instructions where the caller provides all the relevant
2849 * VM-exit information.
2850 *
2851 * @returns Strict VBox status code.
2852 * @param pVCpu The cross context virtual CPU structure.
2853 * @param pExitInfo Pointer to the VM-exit instruction information struct.
2854 */
2855DECLINLINE(VBOXSTRICTRC) iemVmxVmexitInstrWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
2856{
2857 /*
2858 * For instructions where any of the following fields are not applicable:
2859 * - VM-exit instruction info. is undefined.
2860 * - VM-exit qualification must be cleared.
2861 * - VM-exit guest-linear address is undefined.
2862 * - VM-exit guest-physical address is undefined.
2863 *
2864 * The VM-exit instruction length is mandatory for all VM-exits that are caused by
2865 * instruction execution. For VM-exits that are not due to instruction execution this
2866 * field is undefined.
2867 *
2868 * In our implementation in IEM, all undefined fields are generally cleared. However,
2869 * if the caller supplies information (from say the physical CPU directly) it is
2870 * then possible that the undefined fields are not cleared.
2871 *
2872 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2873 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
2874 */
2875 Assert(pExitInfo);
2876 AssertMsg(pExitInfo->uReason <= VMX_EXIT_MAX, ("uReason=%u\n", pExitInfo->uReason));
2877 AssertMsg(pExitInfo->cbInstr >= 1 && pExitInfo->cbInstr <= 15,
2878 ("uReason=%u cbInstr=%u\n", pExitInfo->uReason, pExitInfo->cbInstr));
2879
2880 /* Update all the relevant fields from the VM-exit instruction information struct. */
2881 iemVmxVmcsSetExitInstrInfo(pVCpu, pExitInfo->InstrInfo.u);
2882 iemVmxVmcsSetExitQual(pVCpu, pExitInfo->u64Qual);
2883 iemVmxVmcsSetExitGuestLinearAddr(pVCpu, pExitInfo->u64GuestLinearAddr);
2884 iemVmxVmcsSetExitGuestPhysAddr(pVCpu, pExitInfo->u64GuestPhysAddr);
2885 iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
2886
2887 /* Perform the VM-exit. */
2888 return iemVmxVmexit(pVCpu, pExitInfo->uReason);
2889}
2890
2891
2892/**
2893 * VMX VM-exit handler for VM-exits due to instruction execution.
2894 *
2895 * This is intended for instructions that only provide the VM-exit instruction
2896 * length.
2897 *
2898 * @param pVCpu The cross context virtual CPU structure.
2899 * @param uExitReason The VM-exit reason.
2900 * @param cbInstr The instruction length in bytes.
2901 */
2902IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstr(PVMCPU pVCpu, uint32_t uExitReason, uint8_t cbInstr)
2903{
2904 VMXVEXITINFO ExitInfo;
2905 RT_ZERO(ExitInfo);
2906 ExitInfo.uReason = uExitReason;
2907 ExitInfo.cbInstr = cbInstr;
2908
2909#ifdef VBOX_STRICT
2910 /* To prevent us from shooting ourselves in the foot. Maybe remove later. */
2911 switch (uExitReason)
2912 {
2913 case VMX_EXIT_INVEPT:
2914 case VMX_EXIT_INVPCID:
2915 case VMX_EXIT_LDTR_TR_ACCESS:
2916 case VMX_EXIT_GDTR_IDTR_ACCESS:
2917 case VMX_EXIT_VMCLEAR:
2918 case VMX_EXIT_VMPTRLD:
2919 case VMX_EXIT_VMPTRST:
2920 case VMX_EXIT_VMREAD:
2921 case VMX_EXIT_VMWRITE:
2922 case VMX_EXIT_VMXON:
2923 case VMX_EXIT_XRSTORS:
2924 case VMX_EXIT_XSAVES:
2925 case VMX_EXIT_RDRAND:
2926 case VMX_EXIT_RDSEED:
2927 case VMX_EXIT_IO_INSTR:
2928 AssertMsgFailedReturn(("Use iemVmxVmexitInstrNeedsInfo for uExitReason=%u\n", uExitReason), VERR_IEM_IPE_5);
2929 break;
2930 }
2931#endif
2932
2933 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2934}
2935
2936
2937/**
2938 * VMX VM-exit handler for VM-exits due to instruction execution.
2939 *
2940 * This is intended for instructions that have a ModR/M byte and update the VM-exit
2941 * instruction information and VM-exit qualification fields.
2942 *
2943 * @param pVCpu The cross context virtual CPU structure.
2944 * @param uExitReason The VM-exit reason.
2945 * @param uInstrid The instruction identity (VMXINSTRID_XXX).
2946 * @param cbInstr The instruction length in bytes.
2947 *
2948 * @remarks Do not use this for INS/OUTS instruction.
2949 */
2950IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr)
2951{
2952 VMXVEXITINFO ExitInfo;
2953 RT_ZERO(ExitInfo);
2954 ExitInfo.uReason = uExitReason;
2955 ExitInfo.cbInstr = cbInstr;
2956
2957 /*
2958 * Update the VM-exit qualification field with displacement bytes.
2959 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
2960 */
2961 switch (uExitReason)
2962 {
2963 case VMX_EXIT_INVEPT:
2964 case VMX_EXIT_INVPCID:
2965 case VMX_EXIT_LDTR_TR_ACCESS:
2966 case VMX_EXIT_GDTR_IDTR_ACCESS:
2967 case VMX_EXIT_VMCLEAR:
2968 case VMX_EXIT_VMPTRLD:
2969 case VMX_EXIT_VMPTRST:
2970 case VMX_EXIT_VMREAD:
2971 case VMX_EXIT_VMWRITE:
2972 case VMX_EXIT_VMXON:
2973 case VMX_EXIT_XRSTORS:
2974 case VMX_EXIT_XSAVES:
2975 case VMX_EXIT_RDRAND:
2976 case VMX_EXIT_RDSEED:
2977 {
2978 /* Construct the VM-exit instruction information. */
2979 RTGCPTR GCPtrDisp;
2980 uint32_t const uInstrInfo = iemVmxGetExitInstrInfo(pVCpu, uExitReason, uInstrId, &GCPtrDisp);
2981
2982 /* Update the VM-exit instruction information. */
2983 ExitInfo.InstrInfo.u = uInstrInfo;
2984
2985 /* Update the VM-exit qualification. */
2986 ExitInfo.u64Qual = GCPtrDisp;
2987 break;
2988 }
2989
2990 default:
2991 AssertMsgFailedReturn(("Use instruction-specific handler\n"), VERR_IEM_IPE_5);
2992 break;
2993 }
2994
2995 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
2996}
2997
2998
2999/**
3000 * Checks whether an I/O instruction for the given port is intercepted (causes a
3001 * VM-exit) or not.
3002 *
3003 * @returns @c true if the instruction is intercepted, @c false otherwise.
3004 * @param pVCpu The cross context virtual CPU structure.
3005 * @param u16Port The I/O port being accessed by the instruction.
3006 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3007 */
3008IEM_STATIC bool iemVmxIsIoInterceptSet(PVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess)
3009{
3010 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3011 Assert(pVmcs);
3012
3013 /*
3014 * Check whether the I/O instruction must cause a VM-exit or not.
3015 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3016 */
3017 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_UNCOND_IO_EXIT)
3018 return true;
3019
3020 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
3021 {
3022 uint8_t const *pbIoBitmapA = (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvIoBitmap);
3023 uint8_t const *pbIoBitmapB = (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvIoBitmap) + VMX_V_IO_BITMAP_A_SIZE;
3024 Assert(pbIoBitmapA);
3025 Assert(pbIoBitmapB);
3026 return CPUMVmxGetIoBitmapPermission(pbIoBitmapA, pbIoBitmapB, u16Port, cbAccess);
3027 }
3028
3029 return false;
3030}
3031
3032
3033/**
3034 * VMX VM-exit handler for VM-exits due to INVLPG.
3035 *
3036 * @param pVCpu The cross context virtual CPU structure.
3037 * @param GCPtrPage The guest-linear address of the page being invalidated.
3038 * @param cbInstr The instruction length in bytes.
3039 */
3040IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrInvlpg(PVMCPU pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr)
3041{
3042 VMXVEXITINFO ExitInfo;
3043 RT_ZERO(ExitInfo);
3044 ExitInfo.uReason = VMX_EXIT_INVLPG;
3045 ExitInfo.cbInstr = cbInstr;
3046 ExitInfo.u64Qual = GCPtrPage;
3047 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(ExitInfo.u64Qual));
3048
3049 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3050}
3051
3052
3053/**
3054 * VMX VM-exit handler for VM-exits due to LMSW.
3055 *
3056 * @returns Strict VBox status code.
3057 * @param pVCpu The cross context virtual CPU structure.
3058 * @param uGuestCr0 The current guest CR0.
3059 * @param pu16NewMsw The machine-status word specified in LMSW's source
3060 * operand. This will be updated depending on the VMX
3061 * guest/host CR0 mask if LMSW is not intercepted.
3062 * @param GCPtrEffDst The guest-linear address of the source operand in case
3063 * of a memory operand. For register operand, pass
3064 * NIL_RTGCPTR.
3065 * @param cbInstr The instruction length in bytes.
3066 */
3067IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPU pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw, RTGCPTR GCPtrEffDst,
3068 uint8_t cbInstr)
3069{
3070 /*
3071 * LMSW VM-exits are subject to the CR0 guest/host mask and the CR0 read shadow.
3072 *
3073 * See Intel spec. 24.6.6 "Guest/Host Masks and Read Shadows for CR0 and CR4".
3074 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3075 */
3076 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3077 Assert(pVmcs);
3078 Assert(pu16NewMsw);
3079
3080 bool fIntercept = false;
3081 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
3082 uint32_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
3083
3084 /*
3085 * LMSW can never clear CR0.PE but it may set it. Hence, we handle the
3086 * CR0.PE case first, before the rest of the bits in the MSW.
3087 *
3088 * If CR0.PE is owned by the host and CR0.PE differs between the
3089 * MSW (source operand) and the read-shadow, we must cause a VM-exit.
3090 */
3091 if ( (fGstHostMask & X86_CR0_PE)
3092 && (*pu16NewMsw & X86_CR0_PE)
3093 && !(fReadShadow & X86_CR0_PE))
3094 fIntercept = true;
3095
3096 /*
3097 * If CR0.MP, CR0.EM or CR0.TS is owned by the host, and the corresponding
3098 * bits differ between the MSW (source operand) and the read-shadow, we must
3099 * cause a VM-exit.
3100 */
3101 uint32_t fGstHostLmswMask = fGstHostMask & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3102 if ((fReadShadow & fGstHostLmswMask) != (*pu16NewMsw & fGstHostLmswMask))
3103 fIntercept = true;
3104
3105 if (fIntercept)
3106 {
3107 Log2(("lmsw: Guest intercept -> VM-exit\n"));
3108
3109 VMXVEXITINFO ExitInfo;
3110 RT_ZERO(ExitInfo);
3111 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3112 ExitInfo.cbInstr = cbInstr;
3113
3114 bool const fMemOperand = RT_BOOL(GCPtrEffDst != NIL_RTGCPTR);
3115 if (fMemOperand)
3116 {
3117 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode || !RT_HI_U32(GCPtrEffDst));
3118 ExitInfo.u64GuestLinearAddr = GCPtrEffDst;
3119 }
3120
3121 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
3122 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_LMSW)
3123 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_OP, fMemOperand)
3124 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_LMSW_DATA, *pu16NewMsw);
3125
3126 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3127 }
3128
3129 /*
3130 * If LMSW did not cause a VM-exit, any CR0 bits in the range 0:3 that is set in the
3131 * CR0 guest/host mask must be left unmodified.
3132 *
3133 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3134 */
3135 fGstHostLmswMask = fGstHostMask & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3136 *pu16NewMsw = (uGuestCr0 & fGstHostLmswMask) | (*pu16NewMsw & ~fGstHostLmswMask);
3137
3138 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3139}
3140
3141
3142/**
3143 * VMX VM-exit handler for VM-exits due to CLTS.
3144 *
3145 * @returns Strict VBox status code.
3146 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the CLTS instruction did not cause a
3147 * VM-exit but must not modify the guest CR0.TS bit.
3148 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the CLTS instruction did not cause a
3149 * VM-exit and modification to the guest CR0.TS bit is allowed (subject to
3150 * CR0 fixed bits in VMX operation).
3151 * @param pVCpu The cross context virtual CPU structure.
3152 * @param cbInstr The instruction length in bytes.
3153 */
3154IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPU pVCpu, uint8_t cbInstr)
3155{
3156 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3157 Assert(pVmcs);
3158
3159 uint32_t const fGstHostMask = pVmcs->u64Cr0Mask.u;
3160 uint32_t const fReadShadow = pVmcs->u64Cr0ReadShadow.u;
3161
3162 /*
3163 * If CR0.TS is owned by the host:
3164 * - If CR0.TS is set in the read-shadow, we must cause a VM-exit.
3165 * - If CR0.TS is cleared in the read-shadow, no VM-exit is caused and the
3166 * CLTS instruction completes without clearing CR0.TS.
3167 *
3168 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3169 */
3170 if (fGstHostMask & X86_CR0_TS)
3171 {
3172 if (fReadShadow & X86_CR0_TS)
3173 {
3174 Log2(("clts: Guest intercept -> VM-exit\n"));
3175
3176 VMXVEXITINFO ExitInfo;
3177 RT_ZERO(ExitInfo);
3178 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3179 ExitInfo.cbInstr = cbInstr;
3180
3181 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 0) /* CR0 */
3182 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_CLTS);
3183 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3184 }
3185
3186 return VINF_VMX_MODIFIES_BEHAVIOR;
3187 }
3188
3189 /*
3190 * If CR0.TS is not owned by the host, the CLTS instructions operates normally
3191 * and may modify CR0.TS (subject to CR0 fixed bits in VMX operation).
3192 */
3193 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3194}
3195
3196
3197/**
3198 * VMX VM-exit handler for VM-exits due to 'Mov CR0,GReg' and 'Mov CR4,GReg'
3199 * (CR0/CR4 write).
3200 *
3201 * @returns Strict VBox status code.
3202 * @param pVCpu The cross context virtual CPU structure.
3203 * @param iCrReg The control register (either CR0 or CR4).
3204 * @param uGuestCrX The current guest CR0/CR4.
3205 * @param puNewCrX Pointer to the new CR0/CR4 value. Will be updated
3206 * if no VM-exit is caused.
3207 * @param iGReg The general register from which the CR0/CR4 value is
3208 * being loaded.
3209 * @param cbInstr The instruction length in bytes.
3210 */
3211IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr0Cr4(PVMCPU pVCpu, uint8_t iCrReg, uint64_t *puNewCrX, uint8_t iGReg,
3212 uint8_t cbInstr)
3213{
3214 Assert(puNewCrX);
3215 Assert(iCrReg == 0 || iCrReg == 4);
3216
3217 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3218 Assert(pVmcs);
3219
3220 uint64_t uGuestCrX;
3221 uint64_t fGstHostMask;
3222 uint64_t fReadShadow;
3223 if (iCrReg == 0)
3224 {
3225 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0);
3226 uGuestCrX = pVCpu->cpum.GstCtx.cr0;
3227 fGstHostMask = pVmcs->u64Cr0Mask.u;
3228 fReadShadow = pVmcs->u64Cr0ReadShadow.u;
3229 }
3230 else
3231 {
3232 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4);
3233 uGuestCrX = pVCpu->cpum.GstCtx.cr4;
3234 fGstHostMask = pVmcs->u64Cr4Mask.u;
3235 fReadShadow = pVmcs->u64Cr4ReadShadow.u;
3236 }
3237
3238 /*
3239 * For any CR0/CR4 bit owned by the host (in the CR0/CR4 guest/host mask), if the
3240 * corresponding bits differ between the source operand and the read-shadow,
3241 * we must cause a VM-exit.
3242 *
3243 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3244 */
3245 if ((fReadShadow & fGstHostMask) != (*puNewCrX & fGstHostMask))
3246 {
3247 Log2(("mov_Cr_Rd: (CR%u) Guest intercept -> VM-exit\n", iCrReg));
3248
3249 VMXVEXITINFO ExitInfo;
3250 RT_ZERO(ExitInfo);
3251 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3252 ExitInfo.cbInstr = cbInstr;
3253
3254 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, iCrReg)
3255 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3256 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3257 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3258 }
3259
3260 /*
3261 * If the Mov-to-CR0/CR4 did not cause a VM-exit, any bits owned by the host
3262 * must not be modified the instruction.
3263 *
3264 * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
3265 */
3266 *puNewCrX = (uGuestCrX & fGstHostMask) | (*puNewCrX & ~fGstHostMask);
3267
3268 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3269}
3270
3271
3272/**
3273 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR3' (CR3 read).
3274 *
3275 * @returns VBox strict status code.
3276 * @param pVCpu The cross context virtual CPU structure.
3277 * @param iGReg The general register to which the CR3 value is being stored.
3278 * @param cbInstr The instruction length in bytes.
3279 */
3280IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3281{
3282 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3283 Assert(pVmcs);
3284 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
3285
3286 /*
3287 * If the CR3-store exiting control is set, we must cause a VM-exit.
3288 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3289 */
3290 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_STORE_EXIT)
3291 {
3292 Log2(("mov_Rd_Cr: (CR3) Guest intercept -> VM-exit\n"));
3293
3294 VMXVEXITINFO ExitInfo;
3295 RT_ZERO(ExitInfo);
3296 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3297 ExitInfo.cbInstr = cbInstr;
3298
3299 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
3300 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ)
3301 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3302 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3303 }
3304
3305 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3306}
3307
3308
3309/**
3310 * VMX VM-exit handler for VM-exits due to 'Mov CR3,GReg' (CR3 write).
3311 *
3312 * @returns VBox strict status code.
3313 * @param pVCpu The cross context virtual CPU structure.
3314 * @param uNewCr3 The new CR3 value.
3315 * @param iGReg The general register from which the CR3 value is being
3316 * loaded.
3317 * @param cbInstr The instruction length in bytes.
3318 */
3319IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr3(PVMCPU pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr)
3320{
3321 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3322 Assert(pVmcs);
3323
3324 /*
3325 * If the CR3-load exiting control is set and the new CR3 value does not
3326 * match any of the CR3-target values in the VMCS, we must cause a VM-exit.
3327 *
3328 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3329 */
3330 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR3_LOAD_EXIT)
3331 {
3332 uint32_t uCr3TargetCount = pVmcs->u32Cr3TargetCount;
3333 Assert(uCr3TargetCount <= VMX_V_CR3_TARGET_COUNT);
3334
3335 for (uint32_t idxCr3Target = 0; idxCr3Target < uCr3TargetCount; idxCr3Target++)
3336 {
3337 uint64_t const uCr3TargetValue = iemVmxVmcsGetCr3TargetValue(pVmcs, idxCr3Target);
3338 if (uNewCr3 != uCr3TargetValue)
3339 {
3340 Log2(("mov_Cr_Rd: (CR3) Guest intercept -> VM-exit\n"));
3341
3342 VMXVEXITINFO ExitInfo;
3343 RT_ZERO(ExitInfo);
3344 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3345 ExitInfo.cbInstr = cbInstr;
3346
3347 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 3) /* CR3 */
3348 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3349 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3350 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3351 }
3352 }
3353 }
3354
3355 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3356}
3357
3358
3359/**
3360 * VMX VM-exit handler for VM-exits due to 'Mov GReg,CR8' (CR8 read).
3361 *
3362 * @returns VBox strict status code.
3363 * @param pVCpu The cross context virtual CPU structure.
3364 * @param iGReg The general register to which the CR8 value is being stored.
3365 * @param cbInstr The instruction length in bytes.
3366 */
3367IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3368{
3369 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3370 Assert(pVmcs);
3371
3372 /*
3373 * If the CR8-store exiting control is set, we must cause a VM-exit.
3374 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3375 */
3376 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR8_STORE_EXIT)
3377 {
3378 Log2(("mov_Rd_Cr: (CR8) Guest intercept -> VM-exit\n"));
3379
3380 VMXVEXITINFO ExitInfo;
3381 RT_ZERO(ExitInfo);
3382 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3383 ExitInfo.cbInstr = cbInstr;
3384
3385 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
3386 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_READ)
3387 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3388 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3389 }
3390
3391 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3392}
3393
3394
3395/**
3396 * VMX VM-exit handler for VM-exits due to 'Mov CR8,GReg' (CR8 write).
3397 *
3398 * @returns VBox strict status code.
3399 * @param pVCpu The cross context virtual CPU structure.
3400 * @param iGReg The general register from which the CR8 value is being
3401 * loaded.
3402 * @param cbInstr The instruction length in bytes.
3403 */
3404IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPU pVCpu, uint8_t iGReg, uint8_t cbInstr)
3405{
3406 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3407 Assert(pVmcs);
3408
3409 /*
3410 * If the CR8-load exiting control is set, we must cause a VM-exit.
3411 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3412 */
3413 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_CR8_LOAD_EXIT)
3414 {
3415 Log2(("mov_Cr_Rd: (CR8) Guest intercept -> VM-exit\n"));
3416
3417 VMXVEXITINFO ExitInfo;
3418 RT_ZERO(ExitInfo);
3419 ExitInfo.uReason = VMX_EXIT_MOV_CRX;
3420 ExitInfo.cbInstr = cbInstr;
3421
3422 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_REGISTER, 8) /* CR8 */
3423 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_ACCESS, VMX_EXIT_QUAL_CRX_ACCESS_WRITE)
3424 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_CRX_GENREG, iGReg);
3425 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3426 }
3427
3428 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3429}
3430
3431
3432/**
3433 * VMX VM-exit handler for VM-exits due to 'Mov DRx,GReg' (DRx write) and 'Mov
3434 * GReg,DRx' (DRx read).
3435 *
3436 * @returns VBox strict status code.
3437 * @param pVCpu The cross context virtual CPU structure.
3438 * @param uInstrid The instruction identity (VMXINSTRID_MOV_TO_DRX or
3439 * VMXINSTRID_MOV_FROM_DRX).
3440 * @param iDrReg The debug register being accessed.
3441 * @param iGReg The general register to/from which the DRx value is being
3442 * store/loaded.
3443 * @param cbInstr The instruction length in bytes.
3444 */
3445IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMovDrX(PVMCPU pVCpu, VMXINSTRID uInstrId, uint8_t iDrReg, uint8_t iGReg,
3446 uint8_t cbInstr)
3447{
3448 Assert(iDrReg <= 7);
3449 Assert(uInstrId == VMXINSTRID_MOV_TO_DRX || uInstrId == VMXINSTRID_MOV_FROM_DRX);
3450
3451 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3452 Assert(pVmcs);
3453
3454 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_MOV_DR_EXIT)
3455 {
3456 uint32_t const uDirection = uInstrId == VMXINSTRID_MOV_TO_DRX ? VMX_EXIT_QUAL_DRX_DIRECTION_WRITE
3457 : VMX_EXIT_QUAL_DRX_DIRECTION_READ;
3458 VMXVEXITINFO ExitInfo;
3459 RT_ZERO(ExitInfo);
3460 ExitInfo.uReason = VMX_EXIT_MOV_DRX;
3461 ExitInfo.cbInstr = cbInstr;
3462 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_REGISTER, iDrReg)
3463 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_DIRECTION, uDirection)
3464 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_DRX_GENREG, iGReg);
3465 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3466 }
3467
3468 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3469}
3470
3471
3472/**
3473 * VMX VM-exit handler for VM-exits due to I/O instructions (IN and OUT).
3474 *
3475 * @returns VBox strict status code.
3476 * @param pVCpu The cross context virtual CPU structure.
3477 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_IO_IN or
3478 * VMXINSTRID_IO_OUT).
3479 * @param u16Port The I/O port being accessed.
3480 * @param fImm Whether the I/O port was encoded using an immediate operand
3481 * or the implicit DX register.
3482 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3483 * @param cbInstr The instruction length in bytes.
3484 */
3485IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrIo(PVMCPU pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, bool fImm, uint8_t cbAccess,
3486 uint8_t cbInstr)
3487{
3488 Assert(uInstrId == VMXINSTRID_IO_IN || uInstrId == VMXINSTRID_IO_OUT);
3489 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3490
3491 bool const fIntercept = iemVmxIsIoInterceptSet(pVCpu, u16Port, cbAccess);
3492 if (fIntercept)
3493 {
3494 uint32_t const uDirection = uInstrId == VMXINSTRID_IO_IN ? VMX_EXIT_QUAL_IO_DIRECTION_IN
3495 : VMX_EXIT_QUAL_IO_DIRECTION_OUT;
3496 VMXVEXITINFO ExitInfo;
3497 RT_ZERO(ExitInfo);
3498 ExitInfo.uReason = VMX_EXIT_IO_INSTR;
3499 ExitInfo.cbInstr = cbInstr;
3500 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_WIDTH, cbAccess - 1)
3501 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_DIRECTION, uDirection)
3502 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_ENCODING, fImm)
3503 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_PORT, u16Port);
3504 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3505 }
3506
3507 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3508}
3509
3510
3511/**
3512 * VMX VM-exit handler for VM-exits due to string I/O instructions (INS and OUTS).
3513 *
3514 * @returns VBox strict status code.
3515 * @param pVCpu The cross context virtual CPU structure.
3516 * @param uInstrId The VM-exit instruction identity (VMXINSTRID_IO_INS or
3517 * VMXINSTRID_IO_OUTS).
3518 * @param u16Port The I/O port being accessed.
3519 * @param cbAccess The size of the I/O access in bytes (1, 2 or 4 bytes).
3520 * @param fRep Whether the instruction has a REP prefix or not.
3521 * @param ExitInstrInfo The VM-exit instruction info. field.
3522 * @param cbInstr The instruction length in bytes.
3523 */
3524IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrStrIo(PVMCPU pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, uint8_t cbAccess, bool fRep,
3525 VMXEXITINSTRINFO ExitInstrInfo, uint8_t cbInstr)
3526{
3527 Assert(uInstrId == VMXINSTRID_IO_INS || uInstrId == VMXINSTRID_IO_OUTS);
3528 Assert(cbAccess == 1 || cbAccess == 2 || cbAccess == 4);
3529 Assert(ExitInstrInfo.StrIo.iSegReg < X86_SREG_COUNT);
3530 Assert(ExitInstrInfo.StrIo.u3AddrSize == 0 || ExitInstrInfo.StrIo.u3AddrSize == 1 || ExitInstrInfo.StrIo.u3AddrSize == 2);
3531 Assert(uInstrId != VMXINSTRID_IO_INS || ExitInstrInfo.StrIo.iSegReg == X86_SREG_ES);
3532
3533 bool const fIntercept = iemVmxIsIoInterceptSet(pVCpu, u16Port, cbAccess);
3534 if (fIntercept)
3535 {
3536 /*
3537 * Figure out the guest-linear address and the direction bit (INS/OUTS).
3538 */
3539 /** @todo r=ramshankar: Is there something in IEM that already does this? */
3540 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
3541 uint8_t const iSegReg = ExitInstrInfo.StrIo.iSegReg;
3542 uint8_t const uAddrSize = ExitInstrInfo.StrIo.u3AddrSize;
3543 uint64_t const uAddrSizeMask = s_auAddrSizeMasks[uAddrSize];
3544
3545 uint32_t uDirection;
3546 uint64_t uGuestLinearAddr;
3547 if (uInstrId == VMXINSTRID_IO_INS)
3548 {
3549 uDirection = VMX_EXIT_QUAL_IO_DIRECTION_IN;
3550 uGuestLinearAddr = pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base + (pVCpu->cpum.GstCtx.rdi & uAddrSizeMask);
3551 }
3552 else
3553 {
3554 uDirection = VMX_EXIT_QUAL_IO_DIRECTION_OUT;
3555 uGuestLinearAddr = pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base + (pVCpu->cpum.GstCtx.rsi & uAddrSizeMask);
3556 }
3557
3558 /*
3559 * If the segment is ununsable, the guest-linear address in undefined.
3560 * We shall clear it for consistency.
3561 *
3562 * See Intel spec. 27.2.1 "Basic VM-Exit Information".
3563 */
3564 if (pVCpu->cpum.GstCtx.aSRegs[iSegReg].Attr.n.u1Unusable)
3565 uGuestLinearAddr = 0;
3566
3567 VMXVEXITINFO ExitInfo;
3568 RT_ZERO(ExitInfo);
3569 ExitInfo.uReason = VMX_EXIT_IO_INSTR;
3570 ExitInfo.cbInstr = cbInstr;
3571 ExitInfo.InstrInfo = ExitInstrInfo;
3572 ExitInfo.u64GuestLinearAddr = uGuestLinearAddr;
3573 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_WIDTH, cbAccess - 1)
3574 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_DIRECTION, uDirection)
3575 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_IS_STRING, 1)
3576 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_IS_REP, fRep)
3577 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_ENCODING, VMX_EXIT_QUAL_IO_ENCODING_DX)
3578 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_IO_PORT, u16Port);
3579 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3580 }
3581
3582 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3583}
3584
3585
3586/**
3587 * VMX VM-exit handler for VM-exits due to MWAIT.
3588 *
3589 * @returns VBox strict status code.
3590 * @param pVCpu The cross context virtual CPU structure.
3591 * @param fMonitorHwArmed Whether the address-range monitor hardware is armed.
3592 * @param cbInstr The instruction length in bytes.
3593 */
3594IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrMwait(PVMCPU pVCpu, bool fMonitorHwArmed, uint8_t cbInstr)
3595{
3596 VMXVEXITINFO ExitInfo;
3597 RT_ZERO(ExitInfo);
3598 ExitInfo.uReason = VMX_EXIT_MWAIT;
3599 ExitInfo.cbInstr = cbInstr;
3600 ExitInfo.u64Qual = fMonitorHwArmed;
3601 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3602}
3603
3604
3605/**
3606 * VMX VM-exit handler for VM-exits due to PAUSE.
3607 *
3608 * @returns VBox strict status code.
3609 * @param pVCpu The cross context virtual CPU structure.
3610 * @param cbInstr The instruction length in bytes.
3611 */
3612IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrPause(PVMCPU pVCpu, uint8_t cbInstr)
3613{
3614 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3615 Assert(pVmcs);
3616
3617 /*
3618 * The PAUSE VM-exit is controlled by the "PAUSE exiting" control and the
3619 * "PAUSE-loop exiting" control.
3620 *
3621 * The PLE-Gap is the maximum number of TSC ticks between two successive executions of
3622 * the PAUSE instruction before we cause a VM-exit. The PLE-Window is the maximum amount
3623 * of TSC ticks the guest is allowed to execute in a pause loop before we must cause
3624 * a VM-exit.
3625 *
3626 * See Intel spec. 24.6.13 "Controls for PAUSE-Loop Exiting".
3627 * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
3628 */
3629 bool fIntercept = false;
3630 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_PAUSE_EXIT)
3631 fIntercept = true;
3632 else if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)
3633 && pVCpu->iem.s.uCpl == 0)
3634 {
3635 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);
3636
3637 /*
3638 * A previous-PAUSE-tick value of 0 is used to identify the first time
3639 * execution of a PAUSE instruction after VM-entry at CPL 0. We must
3640 * consider this to be the first execution of PAUSE in a loop according
3641 * to the Intel.
3642 *
3643 * All subsequent records for the previous-PAUSE-tick we ensure that it
3644 * cannot be zero by OR'ing 1 to rule out the TSC wrap-around cases at 0.
3645 */
3646 uint64_t *puFirstPauseLoopTick = &pVCpu->cpum.GstCtx.hwvirt.vmx.uFirstPauseLoopTick;
3647 uint64_t *puPrevPauseTick = &pVCpu->cpum.GstCtx.hwvirt.vmx.uPrevPauseTick;
3648 uint64_t const uTick = TMCpuTickGet(pVCpu);
3649 uint32_t const uPleGap = pVmcs->u32PleGap;
3650 uint32_t const uPleWindow = pVmcs->u32PleWindow;
3651 if ( *puPrevPauseTick == 0
3652 || uTick - *puPrevPauseTick > uPleGap)
3653 *puFirstPauseLoopTick = uTick;
3654 else if (uTick - *puFirstPauseLoopTick > uPleWindow)
3655 fIntercept = true;
3656
3657 *puPrevPauseTick = uTick | 1;
3658 }
3659
3660 if (fIntercept)
3661 {
3662 VMXVEXITINFO ExitInfo;
3663 RT_ZERO(ExitInfo);
3664 ExitInfo.uReason = VMX_EXIT_PAUSE;
3665 ExitInfo.cbInstr = cbInstr;
3666 return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
3667 }
3668
3669 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3670}
3671
3672
3673/**
3674 * VMX VM-exit handler for VM-exits due to task switches.
3675 *
3676 * @returns VBox strict status code.
3677 * @param pVCpu The cross context virtual CPU structure.
3678 * @param enmTaskSwitch The cause of the task switch.
3679 * @param SelNewTss The selector of the new TSS.
3680 * @param cbInstr The instruction length in bytes.
3681 */
3682IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr)
3683{
3684 /*
3685 * Task-switch VM-exits are unconditional and provide the VM-exit qualification.
3686 *
3687 * If the the cause of the task switch is due to execution of CALL, IRET or the JMP
3688 * instruction or delivery of the exception generated by one of these instructions
3689 * lead to a task switch through a task gate in the IDT, we need to provide the
3690 * VM-exit instruction length. Any other means of invoking a task switch VM-exit
3691 * leaves the VM-exit instruction length field undefined.
3692 *
3693 * See Intel spec. 25.2 "Other Causes Of VM Exits".
3694 * See Intel spec. 27.2.4 "Information for VM Exits Due to Instruction Execution".
3695 */
3696 Assert(cbInstr <= 15);
3697
3698 uint8_t uType;
3699 switch (enmTaskSwitch)
3700 {
3701 case IEMTASKSWITCH_CALL: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_CALL; break;
3702 case IEMTASKSWITCH_IRET: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IRET; break;
3703 case IEMTASKSWITCH_JUMP: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_JMP; break;
3704 case IEMTASKSWITCH_INT_XCPT: uType = VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT; break;
3705 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3706 }
3707
3708 uint64_t const uExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_NEW_TSS, SelNewTss)
3709 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_TASK_SWITCH_SOURCE, uType);
3710 iemVmxVmcsSetExitQual(pVCpu, uExitQual);
3711 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
3712 return iemVmxVmexit(pVCpu, VMX_EXIT_TASK_SWITCH);
3713}
3714
3715
3716/**
3717 * VMX VM-exit handler for VM-exits due to expiry of the preemption timer.
3718 *
3719 * @returns VBox strict status code.
3720 * @param pVCpu The cross context virtual CPU structure.
3721 */
3722IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPU pVCpu)
3723{
3724 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3725 Assert(pVmcs);
3726 Assert(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER);
3727 NOREF(pVmcs);
3728
3729 iemVmxVmcsSetExitQual(pVCpu, 0);
3730 return iemVmxVmexit(pVCpu, VMX_EXIT_PREEMPT_TIMER);
3731}
3732
3733
3734/**
3735 * VMX VM-exit handler for VM-exits due to external interrupts.
3736 *
3737 * @returns VBox strict status code.
3738 * @param pVCpu The cross context virtual CPU structure.
3739 * @param uVector The external interrupt vector.
3740 * @param fIntPending Whether the external interrupt is pending or
3741 * acknowdledged in the interrupt controller.
3742 */
3743IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
3744{
3745 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3746 Assert(pVmcs);
3747
3748 /* The VM-exit is subject to "External interrupt exiting" is being set. */
3749 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT)
3750 {
3751 if (fIntPending)
3752 {
3753 /*
3754 * If the interrupt is pending and we don't need to acknowledge the
3755 * interrupt on VM-exit, cause the VM-exit immediately.
3756 *
3757 * See Intel spec 25.2 "Other Causes Of VM Exits".
3758 */
3759 if (!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT))
3760 {
3761 iemVmxVmcsSetExitIntInfo(pVCpu, 0);
3762 iemVmxVmcsSetExitIntErrCode(pVCpu, 0);
3763 iemVmxVmcsSetExitQual(pVCpu, 0);
3764 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT);
3765 }
3766
3767 /*
3768 * If the interrupt is pending and we -do- need to acknowledge the interrupt
3769 * on VM-exit, postpone VM-exit til after the interrupt controller has been
3770 * acknowledged that the interrupt has been consumed.
3771 */
3772 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3773 }
3774
3775 /*
3776 * If the interrupt is no longer pending (i.e. it has been acknowledged) and the
3777 * "External interrupt exiting" and "Acknowledge interrupt on VM-exit" controls are
3778 * all set, we cause the VM-exit now. We need to record the external interrupt that
3779 * just occurred in the VM-exit interruption information field.
3780 *
3781 * See Intel spec. 27.2.2 "Information for VM Exits Due to Vectored Events".
3782 */
3783 if (pVmcs->u32ExitCtls & VMX_EXIT_CTLS_ACK_EXT_INT)
3784 {
3785 uint8_t const fNmiUnblocking = 0; /** @todo NSTVMX: Implement NMI-unblocking due to IRET. */
3786 uint32_t const uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, uVector)
3787 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_EXT_INT)
3788 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, fNmiUnblocking)
3789 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3790 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3791 iemVmxVmcsSetExitIntErrCode(pVCpu, 0);
3792 iemVmxVmcsSetExitQual(pVCpu, 0);
3793 return iemVmxVmexit(pVCpu, VMX_EXIT_EXT_INT);
3794 }
3795 }
3796
3797 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3798}
3799
3800
3801/**
3802 * VMX VM-exit handler for VM-exits due to startup-IPIs (SIPI).
3803 *
3804 * @returns VBox strict status code.
3805 * @param pVCpu The cross context virtual CPU structure.
3806 * @param uVector The SIPI vector.
3807 */
3808IEM_STATIC VBOXSTRICTRC iemVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
3809{
3810 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3811 Assert(pVmcs);
3812
3813 iemVmxVmcsSetExitQual(pVCpu, uVector);
3814 return iemVmxVmexit(pVCpu, VMX_EXIT_SIPI);
3815}
3816
3817
3818/**
3819 * VMX VM-exit handler for VM-exits due to init-IPIs (INIT).
3820 *
3821 * @returns VBox strict status code.
3822 * @param pVCpu The cross context virtual CPU structure.
3823 */
3824IEM_STATIC VBOXSTRICTRC iemVmxVmexitInitIpi(PVMCPU pVCpu)
3825{
3826 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3827 Assert(pVmcs);
3828
3829 iemVmxVmcsSetExitQual(pVCpu, 0);
3830 return iemVmxVmexit(pVCpu, VMX_EXIT_INIT_SIGNAL);
3831}
3832
3833
3834/**
3835 * VMX VM-exit handler for interrupt-window VM-exits.
3836 *
3837 * @returns VBox strict status code.
3838 * @param pVCpu The cross context virtual CPU structure.
3839 */
3840IEM_STATIC VBOXSTRICTRC iemVmxVmexitIntWindow(PVMCPU pVCpu)
3841{
3842 iemVmxVmcsSetExitQual(pVCpu, 0);
3843 return iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW);
3844}
3845
3846
3847/**
3848 * VMX VM-exit handler for VM-exits due to delivery of an event.
3849 *
3850 * @returns VBox strict status code.
3851 * @param pVCpu The cross context virtual CPU structure.
3852 * @param uVector The interrupt / exception vector.
3853 * @param fFlags The flags (see IEM_XCPT_FLAGS_XXX).
3854 * @param uErrCode The error code associated with the event.
3855 * @param uCr2 The CR2 value in case of a \#PF exception.
3856 * @param cbInstr The instruction length in bytes.
3857 */
3858IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2,
3859 uint8_t cbInstr)
3860{
3861 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3862 Assert(pVmcs);
3863
3864 /*
3865 * If the event is being injected as part of VM-entry, it isn't subject to event
3866 * intercepts in the nested-guest. However, secondary exceptions that occur during
3867 * injection of any event -are- subject to event interception.
3868 *
3869 * See Intel spec. 26.5.1.2 "VM Exits During Event Injection".
3870 */
3871 if (!pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents)
3872 {
3873 /* Update the IDT-vectoring event in the VMCS as the source of the upcoming event. */
3874 uint8_t const uIdtVectoringType = iemVmxGetEventType(uVector, fFlags);
3875 uint8_t const fErrCodeValid = (fFlags & IEM_XCPT_FLAGS_ERR);
3876 uint32_t const uIdtVectoringInfo = RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_VECTOR, uVector)
3877 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_TYPE, uIdtVectoringType)
3878 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_ERR_CODE_VALID, fErrCodeValid)
3879 | RT_BF_MAKE(VMX_BF_IDT_VECTORING_INFO_VALID, 1);
3880 iemVmxVmcsSetIdtVectoringInfo(pVCpu, uIdtVectoringInfo);
3881 iemVmxVmcsSetIdtVectoringErrCode(pVCpu, uErrCode);
3882
3883 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = true;
3884 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3885 }
3886
3887 /*
3888 * We are injecting an external interrupt, check if we need to cause a VM-exit now.
3889 * If not, the caller will continue delivery of the external interrupt as it would
3890 * normally.
3891 */
3892 if (fFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3893 {
3894 Assert(!VMX_IDT_VECTORING_INFO_IS_VALID(pVmcs->u32RoIdtVectoringInfo));
3895 return iemVmxVmexitExtInt(pVCpu, uVector, false /* fIntPending */);
3896 }
3897
3898 /*
3899 * Evaluate intercepts for hardware exceptions including #BP, #DB, #OF
3900 * generated by INT3, INT1 (ICEBP) and INTO respectively.
3901 */
3902 Assert(fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_SOFT_INT));
3903 bool fIntercept = false;
3904 bool fIsHwXcpt = false;
3905 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3906 || (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR)))
3907 {
3908 fIsHwXcpt = true;
3909 /* NMIs have a dedicated VM-execution control for causing VM-exits. */
3910 if (uVector == X86_XCPT_NMI)
3911 fIntercept = RT_BOOL(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT);
3912 else
3913 {
3914 /* Page-faults are subject to masking using its error code. */
3915 uint32_t fXcptBitmap = pVmcs->u32XcptBitmap;
3916 if (uVector == X86_XCPT_PF)
3917 {
3918 uint32_t const fXcptPFMask = pVmcs->u32XcptPFMask;
3919 uint32_t const fXcptPFMatch = pVmcs->u32XcptPFMatch;
3920 if ((uErrCode & fXcptPFMask) != fXcptPFMatch)
3921 fXcptBitmap ^= RT_BIT(X86_XCPT_PF);
3922 }
3923
3924 /* Consult the exception bitmap for all hardware exceptions (except NMI). */
3925 if (fXcptBitmap & RT_BIT(uVector))
3926 fIntercept = true;
3927 }
3928 }
3929 /* else: Software interrupts cannot be intercepted and therefore do not cause a VM-exit. */
3930
3931 /*
3932 * Now that we've determined whether the software interrupt or hardware exception
3933 * causes a VM-exit, we need to construct the relevant VM-exit information and
3934 * cause the VM-exit.
3935 */
3936 if (fIntercept)
3937 {
3938 Assert(!(fFlags & IEM_XCPT_FLAGS_T_EXT_INT));
3939
3940 /* Construct the rest of the event related information fields and cause the VM-exit. */
3941 uint64_t uExitQual = 0;
3942 if (fIsHwXcpt)
3943 {
3944 if (uVector == X86_XCPT_PF)
3945 uExitQual = uCr2;
3946 else if (uVector == X86_XCPT_DB)
3947 {
3948 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR6);
3949 uExitQual = pVCpu->cpum.GstCtx.dr[6] & VMX_VMCS_EXIT_QUAL_VALID_MASK;
3950 }
3951 }
3952
3953 uint8_t const fNmiUnblocking = 0; /** @todo NSTVMX: Implement NMI-unblocking due to IRET. */
3954 uint8_t const fErrCodeValid = (fFlags & IEM_XCPT_FLAGS_ERR);
3955 uint8_t const uIntInfoType = iemVmxGetEventType(uVector, fFlags);
3956 uint32_t const uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, uVector)
3957 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, uIntInfoType)
3958 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_ERR_CODE_VALID, fErrCodeValid)
3959 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_NMI_UNBLOCK_IRET, fNmiUnblocking)
3960 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1);
3961 iemVmxVmcsSetExitIntInfo(pVCpu, uExitIntInfo);
3962 iemVmxVmcsSetExitIntErrCode(pVCpu, uErrCode);
3963 iemVmxVmcsSetExitQual(pVCpu, uExitQual);
3964
3965 /*
3966 * For VM exits due to software exceptions (those generated by INT3 or INTO) or privileged
3967 * software exceptions (those generated by INT1/ICEBP) we need to supply the VM-exit instruction
3968 * length.
3969 */
3970 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
3971 && (fFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_OF_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR)))
3972 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
3973 else
3974 iemVmxVmcsSetExitInstrLen(pVCpu, 0);
3975
3976 return iemVmxVmexit(pVCpu, VMX_EXIT_XCPT_OR_NMI);
3977 }
3978
3979 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
3980}
3981
3982
3983/**
3984 * VMX VM-exit handler for VM-exits due to a triple fault.
3985 *
3986 * @returns VBox strict status code.
3987 * @param pVCpu The cross context virtual CPU structure.
3988 */
3989IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu)
3990{
3991 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
3992 Assert(pVmcs);
3993 iemVmxVmcsSetExitQual(pVCpu, 0);
3994 return iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT);
3995}
3996
3997
3998/**
3999 * Reads a 32-bit register from the virtual-APIC page at the given offset.
4000 *
4001 * @returns The register from the virtual-APIC page.
4002 * @param pVCpu The cross context virtual CPU structure.
4003 * @param offReg The offset of the register being read.
4004 */
4005DECLINLINE(uint32_t) iemVmxVirtApicReadRaw32(PVMCPU pVCpu, uint16_t offReg)
4006{
4007 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4008 uint8_t const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4009 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4010 uint32_t const uReg = *(const uint32_t *)(pbVirtApic + offReg);
4011 return uReg;
4012}
4013
4014
4015/**
4016 * Reads a 64-bit register from the virtual-APIC page at the given offset.
4017 *
4018 * @returns The register from the virtual-APIC page.
4019 * @param pVCpu The cross context virtual CPU structure.
4020 * @param offReg The offset of the register being read.
4021 */
4022DECLINLINE(uint64_t) iemVmxVirtApicReadRaw64(PVMCPU pVCpu, uint16_t offReg)
4023{
4024 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4025 uint8_t const *pbVirtApic = (const uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4026 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4027 uint64_t const uReg = *(const uint64_t *)(pbVirtApic + offReg);
4028 return uReg;
4029}
4030
4031
4032/**
4033 * Writes a 32-bit register to the virtual-APIC page at the given offset.
4034 *
4035 * @param pVCpu The cross context virtual CPU structure.
4036 * @param offReg The offset of the register being written.
4037 * @param uReg The register value to write.
4038 */
4039DECLINLINE(void) iemVmxVirtApicWriteRaw32(PVMCPU pVCpu, uint16_t offReg, uint32_t uReg)
4040{
4041 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4042 uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4043 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4044 *(uint32_t *)(pbVirtApic + offReg) = uReg;
4045}
4046
4047
4048/**
4049 * Writes a 64-bit register to the virtual-APIC page at the given offset.
4050 *
4051 * @param pVCpu The cross context virtual CPU structure.
4052 * @param offReg The offset of the register being written.
4053 * @param uReg The register value to write.
4054 */
4055DECLINLINE(void) iemVmxVirtApicWriteRaw64(PVMCPU pVCpu, uint16_t offReg, uint64_t uReg)
4056{
4057 Assert(offReg <= VMX_V_VIRT_APIC_SIZE - sizeof(uint32_t));
4058 uint8_t *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
4059 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
4060 *(uint64_t *)(pbVirtApic + offReg) = uReg;
4061}
4062
4063
4064/**
4065 * Checks if an access of the APIC page must cause an APIC-access VM-exit.
4066 *
4067 * @param pVCpu The cross context virtual CPU structure.
4068 * @param offAccess The offset of the register being accessed.
4069 * @param cbAccess The size of the access in bytes.
4070 * @param fAccess The type of access (must be IEM_ACCESS_TYPE_READ or
4071 * IEM_ACCESS_TYPE_WRITE).
4072 */
4073IEM_STATIC bool iemVmxVirtApicIsAccessIntercepted(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, uint32_t fAccess)
4074{
4075 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4076 Assert(pVmcs);
4077 Assert(fAccess == IEM_ACCESS_TYPE_READ || fAccess == IEM_ACCESS_TYPE_WRITE);
4078
4079 /*
4080 * We must cause a VM-exit if any of the following are true:
4081 * - TPR shadowing isn't active.
4082 * - The access size exceeds 32-bits.
4083 * - The access is not contained within low 4 bytes of a 16-byte aligned offset.
4084 *
4085 * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
4086 * See Intel spec. 29.4.3.1 "Determining Whether a Write Access is Virtualized".
4087 */
4088 if ( !(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
4089 || cbAccess > sizeof(uint32_t)
4090 || ((offAccess + cbAccess - 1) & 0xc)
4091 || offAccess >= XAPIC_OFF_END + 4)
4092 return true;
4093
4094 /*
4095 * If the access is part of an operation where we have already
4096 * virtualized a virtual TPR write, we must cause a VM-exit.
4097 */
4098 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_UPDATE_VAPIC))
4099 return true;
4100
4101 /*
4102 * Check write accesses to the APIC-access page that cause VM-exits.
4103 */
4104 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4105 {
4106 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4107 {
4108 /*
4109 * With APIC-register virtualization, a write access to any of the
4110 * following registers are virtualized. Accessing any other register
4111 * causes a VM-exit.
4112 */
4113 uint16_t const offAlignedAccess = offAccess & 0xfffc;
4114 switch (offAlignedAccess)
4115 {
4116 case XAPIC_OFF_ID:
4117 case XAPIC_OFF_TPR:
4118 case XAPIC_OFF_EOI:
4119 case XAPIC_OFF_LDR:
4120 case XAPIC_OFF_DFR:
4121 case XAPIC_OFF_SVR:
4122 case XAPIC_OFF_ESR:
4123 case XAPIC_OFF_ICR_LO:
4124 case XAPIC_OFF_ICR_HI:
4125 case XAPIC_OFF_LVT_TIMER:
4126 case XAPIC_OFF_LVT_THERMAL:
4127 case XAPIC_OFF_LVT_PERF:
4128 case XAPIC_OFF_LVT_LINT0:
4129 case XAPIC_OFF_LVT_LINT1:
4130 case XAPIC_OFF_LVT_ERROR:
4131 case XAPIC_OFF_TIMER_ICR:
4132 case XAPIC_OFF_TIMER_DCR:
4133 break;
4134 default:
4135 return true;
4136 }
4137 }
4138 else if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4139 {
4140 /*
4141 * With virtual-interrupt delivery, a write access to any of the
4142 * following registers are virtualized. Accessing any other register
4143 * causes a VM-exit.
4144 *
4145 * Note! The specification does not allow writing to offsets in-between
4146 * these registers (e.g. TPR + 1 byte) unlike read accesses.
4147 */
4148 switch (offAccess)
4149 {
4150 case XAPIC_OFF_TPR:
4151 case XAPIC_OFF_EOI:
4152 case XAPIC_OFF_ICR_LO:
4153 break;
4154 default:
4155 return true;
4156 }
4157 }
4158 else
4159 {
4160 /*
4161 * Without APIC-register virtualization or virtual-interrupt delivery,
4162 * only TPR accesses are virtualized.
4163 */
4164 if (offAccess == XAPIC_OFF_TPR)
4165 { /* likely */ }
4166 else
4167 return true;
4168 }
4169 }
4170 else
4171 {
4172 /*
4173 * Check read accesses to the APIC-access page that cause VM-exits.
4174 */
4175 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4176 {
4177 /*
4178 * With APIC-register virtualization, a read access to any of the
4179 * following registers are virtualized. Accessing any other register
4180 * causes a VM-exit.
4181 */
4182 uint16_t const offAlignedAccess = offAccess & 0xfffc;
4183 switch (offAlignedAccess)
4184 {
4185 /** @todo r=ramshankar: What about XAPIC_OFF_LVT_CMCI? */
4186 case XAPIC_OFF_ID:
4187 case XAPIC_OFF_VERSION:
4188 case XAPIC_OFF_TPR:
4189 case XAPIC_OFF_EOI:
4190 case XAPIC_OFF_LDR:
4191 case XAPIC_OFF_DFR:
4192 case XAPIC_OFF_SVR:
4193 case XAPIC_OFF_ISR0: case XAPIC_OFF_ISR1: case XAPIC_OFF_ISR2: case XAPIC_OFF_ISR3:
4194 case XAPIC_OFF_ISR4: case XAPIC_OFF_ISR5: case XAPIC_OFF_ISR6: case XAPIC_OFF_ISR7:
4195 case XAPIC_OFF_TMR0: case XAPIC_OFF_TMR1: case XAPIC_OFF_TMR2: case XAPIC_OFF_TMR3:
4196 case XAPIC_OFF_TMR4: case XAPIC_OFF_TMR5: case XAPIC_OFF_TMR6: case XAPIC_OFF_TMR7:
4197 case XAPIC_OFF_IRR0: case XAPIC_OFF_IRR1: case XAPIC_OFF_IRR2: case XAPIC_OFF_IRR3:
4198 case XAPIC_OFF_IRR4: case XAPIC_OFF_IRR5: case XAPIC_OFF_IRR6: case XAPIC_OFF_IRR7:
4199 case XAPIC_OFF_ESR:
4200 case XAPIC_OFF_ICR_LO:
4201 case XAPIC_OFF_ICR_HI:
4202 case XAPIC_OFF_LVT_TIMER:
4203 case XAPIC_OFF_LVT_THERMAL:
4204 case XAPIC_OFF_LVT_PERF:
4205 case XAPIC_OFF_LVT_LINT0:
4206 case XAPIC_OFF_LVT_LINT1:
4207 case XAPIC_OFF_LVT_ERROR:
4208 case XAPIC_OFF_TIMER_ICR:
4209 case XAPIC_OFF_TIMER_DCR:
4210 break;
4211 default:
4212 return true;
4213 }
4214 }
4215 else
4216 {
4217 /* Without APIC-register virtualization, only TPR accesses are virtualized. */
4218 if (offAccess == XAPIC_OFF_TPR)
4219 { /* likely */ }
4220 else
4221 return true;
4222 }
4223 }
4224
4225 /* The APIC-access is virtualized, does not cause a VM-exit. */
4226 return false;
4227}
4228
4229
4230/**
4231 * VMX VM-exit handler for APIC-write VM-exits.
4232 *
4233 * @param pVCpu The cross context virtual CPU structure.
4234 */
4235IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicWrite(PVMCPU pVCpu)
4236{
4237 iemVmxVmcsSetExitQual(pVCpu, pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite);
4238 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_WRITE);
4239}
4240
4241
4242/**
4243 * VMX VM-exit handler for APIC-accesses.
4244 *
4245 * @param pVCpu The cross context virtual CPU structure.
4246 * @param offAccess The offset of the register being accessed.
4247 * @param fAccess The type of access (must contain IEM_ACCESS_TYPE_READ or
4248 * IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION).
4249 */
4250IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess)
4251{
4252 Assert((fAccess & IEM_ACCESS_TYPE_READ) || (fAccess & IEM_ACCESS_TYPE_WRITE) || (fAccess & IEM_ACCESS_INSTRUCTION));
4253
4254 VMXAPICACCESS enmAccess;
4255 bool const fInEventDelivery = IEMGetCurrentXcpt(pVCpu, NULL, NULL, NULL, NULL);
4256 if (fInEventDelivery)
4257 enmAccess = VMXAPICACCESS_LINEAR_EVENT_DELIVERY;
4258 else if (fAccess & IEM_ACCESS_INSTRUCTION)
4259 enmAccess = VMXAPICACCESS_LINEAR_INSTR_FETCH;
4260 else if (fAccess & IEM_ACCESS_TYPE_WRITE)
4261 enmAccess = VMXAPICACCESS_LINEAR_WRITE;
4262 else
4263 enmAccess = VMXAPICACCESS_LINEAR_WRITE;
4264
4265 uint64_t const uExitQual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_OFFSET, offAccess)
4266 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_TYPE, enmAccess);
4267 iemVmxVmcsSetExitQual(pVCpu, uExitQual);
4268 return iemVmxVmexit(pVCpu, VMX_EXIT_APIC_ACCESS);
4269}
4270
4271
4272/**
4273 * Virtualizes a memory-based APIC-access where the address is not used to access
4274 * memory.
4275 *
4276 * This is for instructions like MONITOR, CLFLUSH, CLFLUSHOPT, ENTER which may cause
4277 * page-faults but do not use the address to access memory.
4278 *
4279 * @param pVCpu The cross context virtual CPU structure.
4280 * @param pGCPhysAccess Pointer to the guest-physical address used.
4281 */
4282IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPU pVCpu, PRTGCPHYS pGCPhysAccess)
4283{
4284 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4285 Assert(pVmcs);
4286 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
4287 Assert(pGCPhysAccess);
4288
4289 RTGCPHYS const GCPhysAccess = *pGCPhysAccess & ~(RTGCPHYS)PAGE_OFFSET_MASK;
4290 RTGCPHYS const GCPhysApic = pVmcs->u64AddrApicAccess.u;
4291 Assert(!(GCPhysApic & PAGE_OFFSET_MASK));
4292
4293 if (GCPhysAccess == GCPhysApic)
4294 {
4295 uint16_t const offAccess = *pGCPhysAccess & PAGE_OFFSET_MASK;
4296 uint32_t const fAccess = IEM_ACCESS_TYPE_READ;
4297 uint16_t const cbAccess = 1;
4298 bool const fIntercept = iemVmxVirtApicIsAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess);
4299 if (fIntercept)
4300 return iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess);
4301
4302 *pGCPhysAccess = GCPhysApic | offAccess;
4303 return VINF_VMX_MODIFIES_BEHAVIOR;
4304 }
4305
4306 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4307}
4308
4309
4310/**
4311 * Virtualizes a memory-based APIC-access.
4312 *
4313 * @returns VBox strict status code.
4314 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the access was virtualized.
4315 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
4316 *
4317 * @param pVCpu The cross context virtual CPU structure.
4318 * @param offAccess The offset of the register being accessed (within the
4319 * APIC-access page).
4320 * @param cbAccess The size of the access in bytes.
4321 * @param pvData Pointer to the data being written or where to store the data
4322 * being read.
4323 * @param fAccess The type of access (must contain IEM_ACCESS_TYPE_READ or
4324 * IEM_ACCESS_TYPE_WRITE or IEM_ACCESS_INSTRUCTION).
4325 */
4326IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData,
4327 uint32_t fAccess)
4328{
4329 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4330 Assert(pVmcs);
4331 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS);
4332 Assert(pvData);
4333 Assert( (fAccess & IEM_ACCESS_TYPE_READ)
4334 || (fAccess & IEM_ACCESS_TYPE_WRITE)
4335 || (fAccess & IEM_ACCESS_INSTRUCTION));
4336
4337 bool const fIntercept = iemVmxVirtApicIsAccessIntercepted(pVCpu, offAccess, cbAccess, fAccess);
4338 if (fIntercept)
4339 return iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess);
4340
4341 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4342 {
4343 /*
4344 * A write access to the APIC-access page that is virtualized (rather than
4345 * causing a VM-exit) writes data to the virtual-APIC page.
4346 */
4347 uint32_t const u32Data = *(uint32_t *)pvData;
4348 iemVmxVirtApicWriteRaw32(pVCpu, offAccess, u32Data);
4349
4350 /*
4351 * Record the currently updated APIC offset, as we need this later for figuring
4352 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4353 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4354 *
4355 * After completion of the current operation, we need to perform TPR virtualization,
4356 * EOI virtualization or APIC-write VM-exit depending on which register was written.
4357 *
4358 * The current operation may be a REP-prefixed string instruction, execution of any
4359 * other instruction, or delivery of an event through the IDT.
4360 *
4361 * Thus things like clearing bytes 3:1 of the VTPR, clearing VEOI are not to be
4362 * performed now but later after completion of the current operation.
4363 *
4364 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
4365 */
4366 iemVmxVirtApicSignalAction(pVCpu, offAccess);
4367 }
4368 else
4369 {
4370 /*
4371 * A read access from the APIC-access page that is virtualized (rather than
4372 * causing a VM-exit) returns data from the virtual-APIC page.
4373 *
4374 * See Intel spec. 29.4.2 "Virtualizing Reads from the APIC-Access Page".
4375 */
4376 Assert(cbAccess <= 4);
4377 Assert(offAccess < XAPIC_OFF_END + 4);
4378 static uint32_t const s_auAccessSizeMasks[] = { 0, 0xff, 0xffff, 0xffffff, 0xffffffff };
4379
4380 uint32_t u32Data = iemVmxVirtApicReadRaw32(pVCpu, offAccess);
4381 u32Data &= s_auAccessSizeMasks[cbAccess];
4382 *(uint32_t *)pvData = u32Data;
4383 }
4384
4385 return VINF_VMX_MODIFIES_BEHAVIOR;
4386}
4387
4388
4389/**
4390 * Virtualizes an MSR-based APIC read access.
4391 *
4392 * @returns VBox strict status code.
4393 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR read was virtualized.
4394 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR read access must be
4395 * handled by the x2APIC device.
4396 * @retval VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
4397 * not within the range of valid MSRs, caller must raise \#GP(0).
4398 * @param pVCpu The cross context virtual CPU structure.
4399 * @param idMsr The x2APIC MSR being read.
4400 * @param pu64Value Where to store the read x2APIC MSR value (only valid when
4401 * VINF_VMX_MODIFIES_BEHAVIOR is returned).
4402 */
4403IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value)
4404{
4405 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4406 Assert(pVmcs);
4407 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS2_VIRT_X2APIC_MODE);
4408 Assert(pu64Value);
4409
4410 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
4411 {
4412 /*
4413 * Intel has different ideas in the x2APIC spec. vs the VT-x spec. as to
4414 * what the end of the valid x2APIC MSR range is. Hence the use of different
4415 * macros here.
4416 *
4417 * See Intel spec. 10.12.1.2 "x2APIC Register Address Space".
4418 * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
4419 */
4420 if ( idMsr >= VMX_V_VIRT_APIC_MSR_START
4421 && idMsr <= VMX_V_VIRT_APIC_MSR_END)
4422 {
4423 uint16_t const offReg = (idMsr & 0xff) << 4;
4424 uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
4425 *pu64Value = u64Value;
4426 return VINF_VMX_MODIFIES_BEHAVIOR;
4427 }
4428 return VERR_OUT_OF_RANGE;
4429 }
4430
4431 if (idMsr == MSR_IA32_X2APIC_TPR)
4432 {
4433 uint16_t const offReg = (idMsr & 0xff) << 4;
4434 uint64_t const u64Value = iemVmxVirtApicReadRaw64(pVCpu, offReg);
4435 *pu64Value = u64Value;
4436 return VINF_VMX_MODIFIES_BEHAVIOR;
4437 }
4438
4439 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4440}
4441
4442
4443/**
4444 * Virtualizes an MSR-based APIC write access.
4445 *
4446 * @returns VBox strict status code.
4447 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR write was virtualized.
4448 * @retval VERR_OUT_RANGE if the MSR read was supposed to be virtualized but was
4449 * not within the range of valid MSRs, caller must raise \#GP(0).
4450 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR must be written normally.
4451 *
4452 * @param pVCpu The cross context virtual CPU structure.
4453 * @param idMsr The x2APIC MSR being written.
4454 * @param u64Value The value of the x2APIC MSR being written.
4455 */
4456IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value)
4457{
4458 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4459 Assert(pVmcs);
4460
4461 /*
4462 * Check if the access is to be virtualized.
4463 * See Intel spec. 29.5 "Virtualizing MSR-based APIC Accesses".
4464 */
4465 if ( idMsr == MSR_IA32_X2APIC_TPR
4466 || ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
4467 && ( idMsr == MSR_IA32_X2APIC_EOI
4468 || idMsr == MSR_IA32_X2APIC_SELF_IPI)))
4469 {
4470 /* Validate the MSR write depending on the register. */
4471 switch (idMsr)
4472 {
4473 case MSR_IA32_X2APIC_TPR:
4474 case MSR_IA32_X2APIC_SELF_IPI:
4475 {
4476 if (u64Value & UINT64_C(0xffffffffffffff00))
4477 return VERR_OUT_OF_RANGE;
4478 break;
4479 }
4480 case MSR_IA32_X2APIC_EOI:
4481 {
4482 if (u64Value != 0)
4483 return VERR_OUT_OF_RANGE;
4484 break;
4485 }
4486 }
4487
4488 /* Write the MSR to the virtual-APIC page. */
4489 uint16_t const offReg = (idMsr & 0xff) << 4;
4490 iemVmxVirtApicWriteRaw64(pVCpu, offReg, u64Value);
4491
4492 /*
4493 * Record the currently updated APIC offset, as we need this later for figuring
4494 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
4495 * as for supplying the exit qualification when causing an APIC-write VM-exit.
4496 */
4497 iemVmxVirtApicSignalAction(pVCpu, offReg);
4498
4499 return VINF_VMX_MODIFIES_BEHAVIOR;
4500 }
4501
4502 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4503}
4504
4505
4506/**
4507 * VMX VM-exit handler for PPR virtualization.
4508 *
4509 * @returns VBox strict status code.
4510 * @param pVCpu The cross context virtual CPU structure.
4511 */
4512IEM_STATIC void iemVmxVmexitPprVirtualization(PVMCPU pVCpu)
4513{
4514 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4515 Assert(pVmcs);
4516
4517 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4518 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
4519
4520 uint32_t const uVTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4521 uint32_t const uSvi = pVmcs->u16GuestIntStatus >> 8;
4522
4523 uint32_t uVPpr;
4524 if (((uVTpr >> 4) & 0xf) >= ((uSvi >> 4) & 0xf))
4525 uVPpr = uVTpr & 0xff;
4526 else
4527 uVPpr = uSvi & 0xf0;
4528
4529 iemVmxVirtApicWriteRaw32(pVCpu, XAPIC_OFF_PPR, uVPpr);
4530 Log2(("ppr_virt: uVTpr=%u uSvi=%u -> VM-exit\n", uVTpr, uSvi));
4531}
4532
4533
4534/**
4535 * VMX VM-exit handler for TPR virtualization.
4536 *
4537 * @returns VBox strict status code.
4538 * @param pVCpu The cross context virtual CPU structure.
4539 * @param cbInstr The instruction length in bytes.
4540 */
4541IEM_STATIC VBOXSTRICTRC iemVmxVmexitTprVirtualization(PVMCPU pVCpu, uint8_t cbInstr)
4542{
4543 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4544 Assert(pVmcs);
4545
4546 Assert(pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW);
4547 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)); /* We don't support virtual-interrupt delivery yet. */
4548 /** @todo NSTVMX: When virtual-interrupt delivery is present, call PPR virt. and
4549 * evaluate pending virtual interrupts. */
4550
4551 uint32_t const uTprThreshold = pVmcs->u32TprThreshold;
4552 uint32_t const uVTpr = iemVmxVirtApicReadRaw32(pVCpu, XAPIC_OFF_TPR);
4553
4554 /*
4555 * If the VTPR falls below the TPR threshold, we must cause a VM-exit.
4556 * See Intel spec. 29.1.2 "TPR Virtualization".
4557 */
4558 if (((uVTpr >> 4) & 0xf) < uTprThreshold)
4559 {
4560 Log2(("tpr_virt: uVTpr=%u uTprThreshold=%u -> VM-exit\n", uVTpr, uTprThreshold));
4561
4562 /*
4563 * This is a trap-like VM-exit. We pass the instruction length along in the VM-exit
4564 * instruction length field and let the VM-exit handler update the RIP when appropriate.
4565 * It will then clear the VM-exit instruction length field before completing the VM-exit.
4566 *
4567 * The VM-exit qualification must be cleared.
4568 */
4569 iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
4570 iemVmxVmcsSetExitQual(pVCpu, 0);
4571 return iemVmxVmexit(pVCpu, VMX_EXIT_TPR_BELOW_THRESHOLD);
4572 }
4573
4574 return VINF_VMX_INTERCEPT_NOT_ACTIVE;
4575}
4576
4577
4578/**
4579 * Checks guest control registers, debug registers and MSRs as part of VM-entry.
4580 *
4581 * @param pVCpu The cross context virtual CPU structure.
4582 * @param pszInstr The VMX instruction name (for logging purposes).
4583 */
4584IEM_STATIC int iemVmxVmentryCheckGuestControlRegsMsrs(PVMCPU pVCpu, const char *pszInstr)
4585{
4586 /*
4587 * Guest Control Registers, Debug Registers, and MSRs.
4588 * See Intel spec. 26.3.1.1 "Checks on Guest Control Registers, Debug Registers, and MSRs".
4589 */
4590 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4591 const char *const pszFailure = "VM-exit";
4592 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
4593
4594 /* CR0 reserved bits. */
4595 {
4596 /* CR0 MB1 bits. */
4597 uint64_t u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
4598 Assert(!(u64Cr0Fixed0 & (X86_CR0_NW | X86_CR0_CD)));
4599 if (fUnrestrictedGuest)
4600 u64Cr0Fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
4601 if ((pVmcs->u64GuestCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0)
4602 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed0);
4603
4604 /* CR0 MBZ bits. */
4605 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
4606 if (pVmcs->u64GuestCr0.u & ~u64Cr0Fixed1)
4607 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0Fixed1);
4608
4609 /* Without unrestricted guest support, VT-x supports does not support unpaged protected mode. */
4610 if ( !fUnrestrictedGuest
4611 && (pVmcs->u64GuestCr0.u & X86_CR0_PG)
4612 && !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
4613 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr0PgPe);
4614 }
4615
4616 /* CR4 reserved bits. */
4617 {
4618 /* CR4 MB1 bits. */
4619 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
4620 if ((pVmcs->u64GuestCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
4621 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed0);
4622
4623 /* CR4 MBZ bits. */
4624 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
4625 if (pVmcs->u64GuestCr4.u & ~u64Cr4Fixed1)
4626 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr4Fixed1);
4627 }
4628
4629 /* DEBUGCTL MSR. */
4630 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4631 && (pVmcs->u64GuestDebugCtlMsr.u & ~MSR_IA32_DEBUGCTL_VALID_MASK_INTEL))
4632 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDebugCtl);
4633
4634 /* 64-bit CPU checks. */
4635 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4636 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4637 {
4638 if (fGstInLongMode)
4639 {
4640 /* PAE must be set. */
4641 if ( (pVmcs->u64GuestCr0.u & X86_CR0_PG)
4642 && (pVmcs->u64GuestCr0.u & X86_CR4_PAE))
4643 { /* likely */ }
4644 else
4645 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPae);
4646 }
4647 else
4648 {
4649 /* PCIDE should not be set. */
4650 if (!(pVmcs->u64GuestCr4.u & X86_CR4_PCIDE))
4651 { /* likely */ }
4652 else
4653 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPcide);
4654 }
4655
4656 /* CR3. */
4657 if (!(pVmcs->u64GuestCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
4658 { /* likely */ }
4659 else
4660 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestCr3);
4661
4662 /* DR7. */
4663 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
4664 && (pVmcs->u64GuestDr7.u & X86_DR7_MBZ_MASK))
4665 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestDr7);
4666
4667 /* SYSENTER ESP and SYSENTER EIP. */
4668 if ( X86_IS_CANONICAL(pVmcs->u64GuestSysenterEsp.u)
4669 && X86_IS_CANONICAL(pVmcs->u64GuestSysenterEip.u))
4670 { /* likely */ }
4671 else
4672 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSysenterEspEip);
4673 }
4674
4675 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
4676 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
4677
4678 /* PAT MSR. */
4679 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
4680 && !CPUMIsPatMsrValid(pVmcs->u64GuestPatMsr.u))
4681 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPatMsr);
4682
4683 /* EFER MSR. */
4684 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
4685 if ( (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
4686 && (pVmcs->u64GuestEferMsr.u & ~uValidEferMask))
4687 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsrRsvd);
4688
4689 bool const fGstLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
4690 bool const fGstLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
4691 if ( fGstInLongMode == fGstLma
4692 && ( !(pVmcs->u64GuestCr0.u & X86_CR0_PG)
4693 || fGstLma == fGstLme))
4694 { /* likely */ }
4695 else
4696 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestEferMsr);
4697
4698 /* We don't support IA32_BNDCFGS MSR yet. */
4699 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
4700
4701 NOREF(pszInstr);
4702 NOREF(pszFailure);
4703 return VINF_SUCCESS;
4704}
4705
4706
4707/**
4708 * Checks guest segment registers, LDTR and TR as part of VM-entry.
4709 *
4710 * @param pVCpu The cross context virtual CPU structure.
4711 * @param pszInstr The VMX instruction name (for logging purposes).
4712 */
4713IEM_STATIC int iemVmxVmentryCheckGuestSegRegs(PVMCPU pVCpu, const char *pszInstr)
4714{
4715 /*
4716 * Segment registers.
4717 * See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
4718 */
4719 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
4720 const char *const pszFailure = "VM-exit";
4721 bool const fGstInV86Mode = RT_BOOL(pVmcs->u64GuestRFlags.u & X86_EFL_VM);
4722 bool const fUnrestrictedGuest = RT_BOOL(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST);
4723 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
4724
4725 /* Selectors. */
4726 if ( !fGstInV86Mode
4727 && !fUnrestrictedGuest
4728 && (pVmcs->GuestSs & X86_SEL_RPL) != (pVmcs->GuestCs & X86_SEL_RPL))
4729 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelCsSsRpl);
4730
4731 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
4732 {
4733 CPUMSELREG SelReg;
4734 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &SelReg);
4735 if (RT_LIKELY(rc == VINF_SUCCESS))
4736 { /* likely */ }
4737 else
4738 return rc;
4739
4740 /*
4741 * Virtual-8086 mode checks.
4742 */
4743 if (fGstInV86Mode)
4744 {
4745 /* Base address. */
4746 if (SelReg.u64Base == (uint64_t)SelReg.Sel << 4)
4747 { /* likely */ }
4748 else
4749 {
4750 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBaseV86(iSegReg);
4751 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4752 }
4753
4754 /* Limit. */
4755 if (SelReg.u32Limit == 0xffff)
4756 { /* likely */ }
4757 else
4758 {
4759 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegLimitV86(iSegReg);
4760 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4761 }
4762
4763 /* Attribute. */
4764 if (SelReg.Attr.u == 0xf3)
4765 { /* likely */ }
4766 else
4767 {
4768 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrV86(iSegReg);
4769 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4770 }
4771
4772 /* We're done; move to checking the next segment. */
4773 continue;
4774 }
4775
4776 /* Checks done by 64-bit CPUs. */
4777 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4778 {
4779 /* Base address. */
4780 if ( iSegReg == X86_SREG_FS
4781 || iSegReg == X86_SREG_GS)
4782 {
4783 if (X86_IS_CANONICAL(SelReg.u64Base))
4784 { /* likely */ }
4785 else
4786 {
4787 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
4788 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4789 }
4790 }
4791 else if (iSegReg == X86_SREG_CS)
4792 {
4793 if (!RT_HI_U32(SelReg.u64Base))
4794 { /* likely */ }
4795 else
4796 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseCs);
4797 }
4798 else
4799 {
4800 if ( SelReg.Attr.n.u1Unusable
4801 || !RT_HI_U32(SelReg.u64Base))
4802 { /* likely */ }
4803 else
4804 {
4805 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegBase(iSegReg);
4806 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4807 }
4808 }
4809 }
4810
4811 /*
4812 * Checks outside Virtual-8086 mode.
4813 */
4814 uint8_t const uSegType = SelReg.Attr.n.u4Type;
4815 uint8_t const fCodeDataSeg = SelReg.Attr.n.u1DescType;
4816 uint8_t const fUsable = !SelReg.Attr.n.u1Unusable;
4817 uint8_t const uDpl = SelReg.Attr.n.u2Dpl;
4818 uint8_t const fPresent = SelReg.Attr.n.u1Present;
4819 uint8_t const uGranularity = SelReg.Attr.n.u1Granularity;
4820 uint8_t const uDefBig = SelReg.Attr.n.u1DefBig;
4821 uint8_t const fSegLong = SelReg.Attr.n.u1Long;
4822
4823 /* Code or usable segment. */
4824 if ( iSegReg == X86_SREG_CS
4825 || fUsable)
4826 {
4827 /* Reserved bits (bits 31:17 and bits 11:8). */
4828 if (!(SelReg.Attr.u & 0xfffe0f00))
4829 { /* likely */ }
4830 else
4831 {
4832 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrRsvd(iSegReg);
4833 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4834 }
4835
4836 /* Descriptor type. */
4837 if (fCodeDataSeg)
4838 { /* likely */ }
4839 else
4840 {
4841 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDescType(iSegReg);
4842 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4843 }
4844
4845 /* Present. */
4846 if (fPresent)
4847 { /* likely */ }
4848 else
4849 {
4850 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrPresent(iSegReg);
4851 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4852 }
4853
4854 /* Granularity. */
4855 if ( ((SelReg.u32Limit & 0x00000fff) == 0x00000fff || !uGranularity)
4856 && ((SelReg.u32Limit & 0xfff00000) == 0x00000000 || uGranularity))
4857 { /* likely */ }
4858 else
4859 {
4860 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrGran(iSegReg);
4861 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4862 }
4863 }
4864
4865 if (iSegReg == X86_SREG_CS)
4866 {
4867 /* Segment Type and DPL. */
4868 if ( uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
4869 && fUnrestrictedGuest)
4870 {
4871 if (uDpl == 0)
4872 { /* likely */ }
4873 else
4874 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplZero);
4875 }
4876 else if ( uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_ACCESSED)
4877 || uSegType == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
4878 {
4879 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
4880 if (uDpl == AttrSs.n.u2Dpl)
4881 { /* likely */ }
4882 else
4883 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplEqSs);
4884 }
4885 else if ((uSegType & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
4886 == (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF | X86_SEL_TYPE_ACCESSED))
4887 {
4888 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
4889 if (uDpl <= AttrSs.n.u2Dpl)
4890 { /* likely */ }
4891 else
4892 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDplLtSs);
4893 }
4894 else
4895 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsType);
4896
4897 /* Def/Big. */
4898 if ( fGstInLongMode
4899 && fSegLong)
4900 {
4901 if (uDefBig == 0)
4902 { /* likely */ }
4903 else
4904 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsDefBig);
4905 }
4906 }
4907 else if (iSegReg == X86_SREG_SS)
4908 {
4909 /* Segment Type. */
4910 if ( !fUsable
4911 || uSegType == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
4912 || uSegType == (X86_SEL_TYPE_DOWN | X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED))
4913 { /* likely */ }
4914 else
4915 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsType);
4916
4917 /* DPL. */
4918 if (fUnrestrictedGuest)
4919 {
4920 if (uDpl == (SelReg.Sel & X86_SEL_RPL))
4921 { /* likely */ }
4922 else
4923 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplEqRpl);
4924 }
4925 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
4926 if ( AttrCs.n.u4Type == (X86_SEL_TYPE_RW | X86_SEL_TYPE_ACCESSED)
4927 || (pVmcs->u64GuestCr0.u & X86_CR0_PE))
4928 {
4929 if (uDpl == 0)
4930 { /* likely */ }
4931 else
4932 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrSsDplZero);
4933 }
4934 }
4935 else
4936 {
4937 /* DS, ES, FS, GS. */
4938 if (fUsable)
4939 {
4940 /* Segment type. */
4941 if (uSegType & X86_SEL_TYPE_ACCESSED)
4942 { /* likely */ }
4943 else
4944 {
4945 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrTypeAcc(iSegReg);
4946 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4947 }
4948
4949 if ( !(uSegType & X86_SEL_TYPE_CODE)
4950 || (uSegType & X86_SEL_TYPE_READ))
4951 { /* likely */ }
4952 else
4953 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrCsTypeRead);
4954
4955 /* DPL. */
4956 if ( !fUnrestrictedGuest
4957 && uSegType <= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_ACCESSED))
4958 {
4959 if (uDpl >= (SelReg.Sel & X86_SEL_RPL))
4960 { /* likely */ }
4961 else
4962 {
4963 VMXVDIAG const enmDiag = iemVmxGetDiagVmentrySegAttrDplRpl(iSegReg);
4964 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
4965 }
4966 }
4967 }
4968 }
4969 }
4970
4971 /*
4972 * LDTR.
4973 */
4974 {
4975 CPUMSELREG Ldtr;
4976 Ldtr.Sel = pVmcs->GuestLdtr;
4977 Ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
4978 Ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
4979 Ldtr.Attr.u = pVmcs->u32GuestLdtrLimit;
4980
4981 if (!Ldtr.Attr.n.u1Unusable)
4982 {
4983 /* Selector. */
4984 if (!(Ldtr.Sel & X86_SEL_LDT))
4985 { /* likely */ }
4986 else
4987 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelLdtr);
4988
4989 /* Base. */
4990 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
4991 {
4992 if (X86_IS_CANONICAL(Ldtr.u64Base))
4993 { /* likely */ }
4994 else
4995 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseLdtr);
4996 }
4997
4998 /* Attributes. */
4999 /* Reserved bits (bits 31:17 and bits 11:8). */
5000 if (!(Ldtr.Attr.u & 0xfffe0f00))
5001 { /* likely */ }
5002 else
5003 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrRsvd);
5004
5005 if (Ldtr.Attr.n.u4Type == X86_SEL_TYPE_SYS_LDT)
5006 { /* likely */ }
5007 else
5008 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrType);
5009
5010 if (!Ldtr.Attr.n.u1DescType)
5011 { /* likely */ }
5012 else
5013 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrDescType);
5014
5015 if (Ldtr.Attr.n.u1Present)
5016 { /* likely */ }
5017 else
5018 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrPresent);
5019
5020 if ( ((Ldtr.u32Limit & 0x00000fff) == 0x00000fff || !Ldtr.Attr.n.u1Granularity)
5021 && ((Ldtr.u32Limit & 0xfff00000) == 0x00000000 || Ldtr.Attr.n.u1Granularity))
5022 { /* likely */ }
5023 else
5024 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrLdtrGran);
5025 }
5026 }
5027
5028 /*
5029 * TR.
5030 */
5031 {
5032 CPUMSELREG Tr;
5033 Tr.Sel = pVmcs->GuestTr;
5034 Tr.u32Limit = pVmcs->u32GuestTrLimit;
5035 Tr.u64Base = pVmcs->u64GuestTrBase.u;
5036 Tr.Attr.u = pVmcs->u32GuestTrLimit;
5037
5038 /* Selector. */
5039 if (!(Tr.Sel & X86_SEL_LDT))
5040 { /* likely */ }
5041 else
5042 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegSelTr);
5043
5044 /* Base. */
5045 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5046 {
5047 if (X86_IS_CANONICAL(Tr.u64Base))
5048 { /* likely */ }
5049 else
5050 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegBaseTr);
5051 }
5052
5053 /* Attributes. */
5054 /* Reserved bits (bits 31:17 and bits 11:8). */
5055 if (!(Tr.Attr.u & 0xfffe0f00))
5056 { /* likely */ }
5057 else
5058 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrRsvd);
5059
5060 if (!Tr.Attr.n.u1Unusable)
5061 { /* likely */ }
5062 else
5063 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrUnusable);
5064
5065 if ( Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_386_TSS_BUSY
5066 || ( !fGstInLongMode
5067 && Tr.Attr.n.u4Type == X86_SEL_TYPE_SYS_286_TSS_BUSY))
5068 { /* likely */ }
5069 else
5070 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrType);
5071
5072 if (!Tr.Attr.n.u1DescType)
5073 { /* likely */ }
5074 else
5075 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrDescType);
5076
5077 if (Tr.Attr.n.u1Present)
5078 { /* likely */ }
5079 else
5080 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrPresent);
5081
5082 if ( ((Tr.u32Limit & 0x00000fff) == 0x00000fff || !Tr.Attr.n.u1Granularity)
5083 && ((Tr.u32Limit & 0xfff00000) == 0x00000000 || Tr.Attr.n.u1Granularity))
5084 { /* likely */ }
5085 else
5086 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestSegAttrTrGran);
5087 }
5088
5089 NOREF(pszInstr);
5090 NOREF(pszFailure);
5091 return VINF_SUCCESS;
5092}
5093
5094
5095/**
5096 * Checks guest GDTR and IDTR as part of VM-entry.
5097 *
5098 * @param pVCpu The cross context virtual CPU structure.
5099 * @param pszInstr The VMX instruction name (for logging purposes).
5100 */
5101IEM_STATIC int iemVmxVmentryCheckGuestGdtrIdtr(PVMCPU pVCpu, const char *pszInstr)
5102{
5103 /*
5104 * GDTR and IDTR.
5105 * See Intel spec. 26.3.1.3 "Checks on Guest Descriptor-Table Registers".
5106 */
5107 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5108 const char *const pszFailure = "VM-exit";
5109
5110 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5111 {
5112 /* Base. */
5113 if (X86_IS_CANONICAL(pVmcs->u64GuestGdtrBase.u))
5114 { /* likely */ }
5115 else
5116 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrBase);
5117
5118 if (X86_IS_CANONICAL(pVmcs->u64GuestIdtrBase.u))
5119 { /* likely */ }
5120 else
5121 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrBase);
5122 }
5123
5124 /* Limit. */
5125 if (!RT_HI_U16(pVmcs->u32GuestGdtrLimit))
5126 { /* likely */ }
5127 else
5128 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestGdtrLimit);
5129
5130 if (!RT_HI_U16(pVmcs->u32GuestIdtrLimit))
5131 { /* likely */ }
5132 else
5133 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIdtrLimit);
5134
5135 NOREF(pszInstr);
5136 NOREF(pszFailure);
5137 return VINF_SUCCESS;
5138}
5139
5140
5141/**
5142 * Checks guest RIP and RFLAGS as part of VM-entry.
5143 *
5144 * @param pVCpu The cross context virtual CPU structure.
5145 * @param pszInstr The VMX instruction name (for logging purposes).
5146 */
5147IEM_STATIC int iemVmxVmentryCheckGuestRipRFlags(PVMCPU pVCpu, const char *pszInstr)
5148{
5149 /*
5150 * RIP and RFLAGS.
5151 * See Intel spec. 26.3.1.4 "Checks on Guest RIP and RFLAGS".
5152 */
5153 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5154 const char *const pszFailure = "VM-exit";
5155 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5156
5157 /* RIP. */
5158 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5159 {
5160 X86DESCATTR AttrCs; AttrCs.u = pVmcs->u32GuestCsAttr;
5161 if ( !fGstInLongMode
5162 || !AttrCs.n.u1Long)
5163 {
5164 if (!RT_HI_U32(pVmcs->u64GuestRip.u))
5165 { /* likely */ }
5166 else
5167 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRipRsvd);
5168 }
5169
5170 if ( fGstInLongMode
5171 && AttrCs.n.u1Long)
5172 {
5173 Assert(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth == 48); /* Canonical. */
5174 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxLinearAddrWidth < 64
5175 && X86_IS_CANONICAL(pVmcs->u64GuestRip.u))
5176 { /* likely */ }
5177 else
5178 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRip);
5179 }
5180 }
5181
5182 /* RFLAGS (bits 63:22 (or 31:22), bits 15, 5, 3 are reserved, bit 1 MB1). */
5183 uint64_t const uGuestRFlags = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode ? pVmcs->u64GuestRFlags.u
5184 : pVmcs->u64GuestRFlags.s.Lo;
5185 if ( !(uGuestRFlags & ~(X86_EFL_LIVE_MASK | X86_EFL_RA1_MASK))
5186 && (uGuestRFlags & X86_EFL_RA1_MASK) == X86_EFL_RA1_MASK)
5187 { /* likely */ }
5188 else
5189 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsRsvd);
5190
5191 if ( fGstInLongMode
5192 || !(pVmcs->u64GuestCr0.u & X86_CR0_PE))
5193 {
5194 if (!(uGuestRFlags & X86_EFL_VM))
5195 { /* likely */ }
5196 else
5197 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsVm);
5198 }
5199
5200 if ( VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo)
5201 && VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo) == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5202 {
5203 if (uGuestRFlags & X86_EFL_IF)
5204 { /* likely */ }
5205 else
5206 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestRFlagsIf);
5207 }
5208
5209 NOREF(pszInstr);
5210 NOREF(pszFailure);
5211 return VINF_SUCCESS;
5212}
5213
5214
5215/**
5216 * Checks guest non-register state as part of VM-entry.
5217 *
5218 * @param pVCpu The cross context virtual CPU structure.
5219 * @param pszInstr The VMX instruction name (for logging purposes).
5220 */
5221IEM_STATIC int iemVmxVmentryCheckGuestNonRegState(PVMCPU pVCpu, const char *pszInstr)
5222{
5223 /*
5224 * Guest non-register state.
5225 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
5226 */
5227 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5228 const char *const pszFailure = "VM-exit";
5229
5230 /*
5231 * Activity state.
5232 */
5233 uint64_t const u64GuestVmxMiscMsr = CPUMGetGuestIa32VmxMisc(pVCpu);
5234 uint32_t const fActivityStateMask = RT_BF_GET(u64GuestVmxMiscMsr, VMX_BF_MISC_ACTIVITY_STATES);
5235 if (!(pVmcs->u32GuestActivityState & fActivityStateMask))
5236 { /* likely */ }
5237 else
5238 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateRsvd);
5239
5240 X86DESCATTR AttrSs; AttrSs.u = pVmcs->u32GuestSsAttr;
5241 if ( !AttrSs.n.u2Dpl
5242 || pVmcs->u32GuestActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT)
5243 { /* likely */ }
5244 else
5245 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateSsDpl);
5246
5247 if ( pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_STI
5248 || pVmcs->u32GuestIntrState == VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)
5249 {
5250 if (pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE)
5251 { /* likely */ }
5252 else
5253 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateStiMovSs);
5254 }
5255
5256 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
5257 {
5258 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
5259 uint8_t const uVector = VMX_ENTRY_INT_INFO_VECTOR(pVmcs->u32EntryIntInfo);
5260 AssertCompile(VMX_V_GUEST_ACTIVITY_STATE_MASK == (VMX_VMCS_GUEST_ACTIVITY_HLT | VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN));
5261 switch (pVmcs->u32GuestActivityState)
5262 {
5263 case VMX_VMCS_GUEST_ACTIVITY_HLT:
5264 {
5265 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT
5266 || uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
5267 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5268 && ( uVector == X86_XCPT_DB
5269 || uVector == X86_XCPT_MC))
5270 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT
5271 && uVector == VMX_ENTRY_INT_INFO_VECTOR_MTF))
5272 { /* likely */ }
5273 else
5274 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateHlt);
5275 break;
5276 }
5277
5278 case VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN:
5279 {
5280 if ( uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI
5281 || ( uIntType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5282 && uVector == X86_XCPT_MC))
5283 { /* likely */ }
5284 else
5285 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestActStateShutdown);
5286 break;
5287 }
5288
5289 case VMX_VMCS_GUEST_ACTIVITY_ACTIVE:
5290 default:
5291 break;
5292 }
5293 }
5294
5295 /*
5296 * Interruptibility state.
5297 */
5298 if (!(pVmcs->u32GuestIntrState & ~VMX_VMCS_GUEST_INT_STATE_MASK))
5299 { /* likely */ }
5300 else
5301 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRsvd);
5302
5303 if ((pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5304 != (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5305 { /* likely */ }
5306 else
5307 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateStiMovSs);
5308
5309 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_IF)
5310 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5311 { /* likely */ }
5312 else
5313 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateRFlagsSti);
5314
5315 if (VMX_ENTRY_INT_INFO_IS_VALID(pVmcs->u32EntryIntInfo))
5316 {
5317 uint8_t const uIntType = VMX_ENTRY_INT_INFO_TYPE(pVmcs->u32EntryIntInfo);
5318 if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
5319 {
5320 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
5321 { /* likely */ }
5322 else
5323 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateExtInt);
5324 }
5325 else if (uIntType == VMX_ENTRY_INT_INFO_TYPE_NMI)
5326 {
5327 if (!(pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)))
5328 { /* likely */ }
5329 else
5330 {
5331 /*
5332 * We don't support injecting NMIs when blocking-by-STI would be in effect.
5333 * We update the VM-exit qualification only when blocking-by-STI is set
5334 * without blocking-by-MovSS being set. Although in practise it does not
5335 * make much difference since the order of checks are implementation defined.
5336 */
5337 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
5338 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_NMI_INJECT);
5339 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateNmi);
5340 }
5341
5342 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
5343 || !(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI))
5344 { /* likely */ }
5345 else
5346 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateVirtNmi);
5347 }
5348 }
5349
5350 /* We don't support SMM yet. So blocking-by-SMIs must not be set. */
5351 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI))
5352 { /* likely */ }
5353 else
5354 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateSmi);
5355
5356 /* We don't support SGX yet. So enclave-interruption must not be set. */
5357 if (!(pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_ENCLAVE))
5358 { /* likely */ }
5359 else
5360 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestIntStateEnclave);
5361
5362 /*
5363 * Pending debug exceptions.
5364 */
5365 uint64_t const uPendingDbgXcpt = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode
5366 ? pVmcs->u64GuestPendingDbgXcpt.u
5367 : pVmcs->u64GuestPendingDbgXcpt.s.Lo;
5368 if (!(uPendingDbgXcpt & ~VMX_VMCS_GUEST_PENDING_DEBUG_VALID_MASK))
5369 { /* likely */ }
5370 else
5371 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRsvd);
5372
5373 if ( (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
5374 || pVmcs->u32GuestActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
5375 {
5376 if ( (pVmcs->u64GuestRFlags.u & X86_EFL_TF)
5377 && !(pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF)
5378 && !(uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
5379 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf);
5380
5381 if ( ( !(pVmcs->u64GuestRFlags.u & X86_EFL_TF)
5382 || (pVmcs->u64GuestDebugCtlMsr.u & MSR_IA32_DEBUGCTL_BTF))
5383 && (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS))
5384 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf);
5385 }
5386
5387 /* We don't support RTM (Real-time Transactional Memory) yet. */
5388 if (uPendingDbgXcpt & VMX_VMCS_GUEST_PENDING_DEBUG_RTM)
5389 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPndDbgXcptRtm);
5390
5391 /*
5392 * VMCS link pointer.
5393 */
5394 if (pVmcs->u64VmcsLinkPtr.u != UINT64_C(0xffffffffffffffff))
5395 {
5396 RTGCPHYS const GCPhysShadowVmcs = pVmcs->u64VmcsLinkPtr.u;
5397 /* We don't support SMM yet (so VMCS link pointer cannot be the current VMCS). */
5398 if (GCPhysShadowVmcs != IEM_VMX_GET_CURRENT_VMCS(pVCpu))
5399 { /* likely */ }
5400 else
5401 {
5402 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5403 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrCurVmcs);
5404 }
5405
5406 /* Validate the address. */
5407 if ( (GCPhysShadowVmcs & X86_PAGE_4K_OFFSET_MASK)
5408 || (GCPhysShadowVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5409 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysShadowVmcs))
5410 {
5411 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5412 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmcsLinkPtr);
5413 }
5414
5415 /* Read the VMCS-link pointer from guest memory. */
5416 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs));
5417 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs),
5418 GCPhysShadowVmcs, VMX_V_VMCS_SIZE);
5419 if (RT_FAILURE(rc))
5420 {
5421 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5422 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys);
5423 }
5424
5425 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
5426 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.u31RevisionId == VMX_V_VMCS_REVISION_ID)
5427 { /* likely */ }
5428 else
5429 {
5430 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5431 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrRevId);
5432 }
5433
5434 /* Verify the shadow bit is set if VMCS shadowing is enabled . */
5435 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
5436 || pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs)->u32VmcsRevId.n.fIsShadowVmcs)
5437 { /* likely */ }
5438 else
5439 {
5440 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_VMCS_LINK_PTR);
5441 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmcsLinkPtrShadow);
5442 }
5443
5444 /* Finally update our cache of the guest physical address of the shadow VMCS. */
5445 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs = GCPhysShadowVmcs;
5446 }
5447
5448 NOREF(pszInstr);
5449 NOREF(pszFailure);
5450 return VINF_SUCCESS;
5451}
5452
5453
5454/**
5455 * Checks if the PDPTEs referenced by the nested-guest CR3 are valid as part of
5456 * VM-entry.
5457 *
5458 * @returns @c true if all PDPTEs are valid, @c false otherwise.
5459 * @param pVCpu The cross context virtual CPU structure.
5460 * @param pszInstr The VMX instruction name (for logging purposes).
5461 * @param pVmcs Pointer to the virtual VMCS.
5462 */
5463IEM_STATIC int iemVmxVmentryCheckGuestPdptesForCr3(PVMCPU pVCpu, const char *pszInstr, PVMXVVMCS pVmcs)
5464{
5465 /*
5466 * Check PDPTEs.
5467 * See Intel spec. 4.4.1 "PDPTE Registers".
5468 */
5469 uint64_t const uGuestCr3 = pVmcs->u64GuestCr3.u & X86_CR3_PAE_PAGE_MASK;
5470 const char *const pszFailure = "VM-exit";
5471
5472 X86PDPE aPdptes[X86_PG_PAE_PDPE_ENTRIES];
5473 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&aPdptes[0], uGuestCr3, sizeof(aPdptes));
5474 if (RT_SUCCESS(rc))
5475 {
5476 for (unsigned iPdpte = 0; iPdpte < RT_ELEMENTS(aPdptes); iPdpte++)
5477 {
5478 if ( !(aPdptes[iPdpte].u & X86_PDPE_P)
5479 || !(aPdptes[iPdpte].u & X86_PDPE_PAE_MBZ_MASK))
5480 { /* likely */ }
5481 else
5482 {
5483 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
5484 VMXVDIAG const enmDiag = iemVmxGetDiagVmentryPdpteRsvd(iPdpte);
5485 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
5486 }
5487 }
5488 }
5489 else
5490 {
5491 iemVmxVmcsSetExitQual(pVCpu, VMX_ENTRY_FAIL_QUAL_PDPTE);
5492 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys);
5493 }
5494
5495 NOREF(pszFailure);
5496 return rc;
5497}
5498
5499
5500/**
5501 * Checks guest PDPTEs as part of VM-entry.
5502 *
5503 * @param pVCpu The cross context virtual CPU structure.
5504 * @param pszInstr The VMX instruction name (for logging purposes).
5505 */
5506IEM_STATIC int iemVmxVmentryCheckGuestPdptes(PVMCPU pVCpu, const char *pszInstr)
5507{
5508 /*
5509 * Guest PDPTEs.
5510 * See Intel spec. 26.3.1.5 "Checks on Guest Page-Directory-Pointer-Table Entries".
5511 */
5512 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5513 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5514
5515 /* Check PDPTes if the VM-entry is to a guest using PAE paging. */
5516 int rc;
5517 if ( !fGstInLongMode
5518 && (pVmcs->u64GuestCr4.u & X86_CR4_PAE)
5519 && (pVmcs->u64GuestCr0.u & X86_CR0_PG))
5520 {
5521 /*
5522 * We don't support nested-paging for nested-guests yet.
5523 *
5524 * Without nested-paging for nested-guests, PDPTEs in the VMCS are not used,
5525 * rather we need to check the PDPTEs referenced by the guest CR3.
5526 */
5527 rc = iemVmxVmentryCheckGuestPdptesForCr3(pVCpu, pszInstr, pVmcs);
5528 }
5529 else
5530 rc = VINF_SUCCESS;
5531 return rc;
5532}
5533
5534
5535/**
5536 * Checks guest-state as part of VM-entry.
5537 *
5538 * @returns VBox status code.
5539 * @param pVCpu The cross context virtual CPU structure.
5540 * @param pszInstr The VMX instruction name (for logging purposes).
5541 */
5542IEM_STATIC int iemVmxVmentryCheckGuestState(PVMCPU pVCpu, const char *pszInstr)
5543{
5544 int rc = iemVmxVmentryCheckGuestControlRegsMsrs(pVCpu, pszInstr);
5545 if (RT_SUCCESS(rc))
5546 {
5547 rc = iemVmxVmentryCheckGuestSegRegs(pVCpu, pszInstr);
5548 if (RT_SUCCESS(rc))
5549 {
5550 rc = iemVmxVmentryCheckGuestGdtrIdtr(pVCpu, pszInstr);
5551 if (RT_SUCCESS(rc))
5552 {
5553 rc = iemVmxVmentryCheckGuestRipRFlags(pVCpu, pszInstr);
5554 if (RT_SUCCESS(rc))
5555 {
5556 rc = iemVmxVmentryCheckGuestNonRegState(pVCpu, pszInstr);
5557 if (RT_SUCCESS(rc))
5558 return iemVmxVmentryCheckGuestPdptes(pVCpu, pszInstr);
5559 }
5560 }
5561 }
5562 }
5563 return rc;
5564}
5565
5566
5567/**
5568 * Checks host-state as part of VM-entry.
5569 *
5570 * @returns VBox status code.
5571 * @param pVCpu The cross context virtual CPU structure.
5572 * @param pszInstr The VMX instruction name (for logging purposes).
5573 */
5574IEM_STATIC int iemVmxVmentryCheckHostState(PVMCPU pVCpu, const char *pszInstr)
5575{
5576 /*
5577 * Host Control Registers and MSRs.
5578 * See Intel spec. 26.2.2 "Checks on Host Control Registers and MSRs".
5579 */
5580 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5581 const char * const pszFailure = "VMFail";
5582
5583 /* CR0 reserved bits. */
5584 {
5585 /* CR0 MB1 bits. */
5586 uint64_t const u64Cr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
5587 if ((pVmcs->u64HostCr0.u & u64Cr0Fixed0) != u64Cr0Fixed0)
5588 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed0);
5589
5590 /* CR0 MBZ bits. */
5591 uint64_t const u64Cr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
5592 if (pVmcs->u64HostCr0.u & ~u64Cr0Fixed1)
5593 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr0Fixed1);
5594 }
5595
5596 /* CR4 reserved bits. */
5597 {
5598 /* CR4 MB1 bits. */
5599 uint64_t const u64Cr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
5600 if ((pVmcs->u64HostCr4.u & u64Cr4Fixed0) != u64Cr4Fixed0)
5601 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed0);
5602
5603 /* CR4 MBZ bits. */
5604 uint64_t const u64Cr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
5605 if (pVmcs->u64HostCr4.u & ~u64Cr4Fixed1)
5606 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Fixed1);
5607 }
5608
5609 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5610 {
5611 /* CR3 reserved bits. */
5612 if (!(pVmcs->u64HostCr3.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth))
5613 { /* likely */ }
5614 else
5615 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr3);
5616
5617 /* SYSENTER ESP and SYSENTER EIP. */
5618 if ( X86_IS_CANONICAL(pVmcs->u64HostSysenterEsp.u)
5619 && X86_IS_CANONICAL(pVmcs->u64HostSysenterEip.u))
5620 { /* likely */ }
5621 else
5622 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSysenterEspEip);
5623 }
5624
5625 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
5626 Assert(!(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PERF_MSR));
5627
5628 /* PAT MSR. */
5629 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_PAT_MSR)
5630 || CPUMIsPatMsrValid(pVmcs->u64HostPatMsr.u))
5631 { /* likely */ }
5632 else
5633 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostPatMsr);
5634
5635 /* EFER MSR. */
5636 uint64_t const uValidEferMask = CPUMGetGuestEferMsrValidMask(pVCpu->CTX_SUFF(pVM));
5637 if ( !(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_LOAD_EFER_MSR)
5638 || !(pVmcs->u64HostEferMsr.u & ~uValidEferMask))
5639 { /* likely */ }
5640 else
5641 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsrRsvd);
5642
5643 bool const fHostInLongMode = RT_BOOL(pVmcs->u32ExitCtls & VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE);
5644 bool const fHostLma = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LMA);
5645 bool const fHostLme = RT_BOOL(pVmcs->u64HostEferMsr.u & MSR_K6_EFER_BIT_LME);
5646 if ( fHostInLongMode == fHostLma
5647 && fHostInLongMode == fHostLme)
5648 { /* likely */ }
5649 else
5650 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostEferMsr);
5651
5652 /*
5653 * Host Segment and Descriptor-Table Registers.
5654 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
5655 */
5656 /* Selector RPL and TI. */
5657 if ( !(pVmcs->HostCs & (X86_SEL_RPL | X86_SEL_LDT))
5658 && !(pVmcs->HostSs & (X86_SEL_RPL | X86_SEL_LDT))
5659 && !(pVmcs->HostDs & (X86_SEL_RPL | X86_SEL_LDT))
5660 && !(pVmcs->HostEs & (X86_SEL_RPL | X86_SEL_LDT))
5661 && !(pVmcs->HostFs & (X86_SEL_RPL | X86_SEL_LDT))
5662 && !(pVmcs->HostGs & (X86_SEL_RPL | X86_SEL_LDT))
5663 && !(pVmcs->HostTr & (X86_SEL_RPL | X86_SEL_LDT)))
5664 { /* likely */ }
5665 else
5666 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSel);
5667
5668 /* CS and TR selectors cannot be 0. */
5669 if ( pVmcs->HostCs
5670 && pVmcs->HostTr)
5671 { /* likely */ }
5672 else
5673 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCsTr);
5674
5675 /* SS cannot be 0 if 32-bit host. */
5676 if ( fHostInLongMode
5677 || pVmcs->HostSs)
5678 { /* likely */ }
5679 else
5680 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSs);
5681
5682 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5683 {
5684 /* FS, GS, GDTR, IDTR, TR base address. */
5685 if ( X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
5686 && X86_IS_CANONICAL(pVmcs->u64HostFsBase.u)
5687 && X86_IS_CANONICAL(pVmcs->u64HostGdtrBase.u)
5688 && X86_IS_CANONICAL(pVmcs->u64HostIdtrBase.u)
5689 && X86_IS_CANONICAL(pVmcs->u64HostTrBase.u))
5690 { /* likely */ }
5691 else
5692 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostSegBase);
5693 }
5694
5695 /*
5696 * Host address-space size for 64-bit CPUs.
5697 * See Intel spec. 26.2.4 "Checks Related to Address-Space Size".
5698 */
5699 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
5700 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
5701 {
5702 bool const fCpuInLongMode = CPUMIsGuestInLongMode(pVCpu);
5703
5704 /* Logical processor in IA-32e mode. */
5705 if (fCpuInLongMode)
5706 {
5707 if (fHostInLongMode)
5708 {
5709 /* PAE must be set. */
5710 if (pVmcs->u64HostCr4.u & X86_CR4_PAE)
5711 { /* likely */ }
5712 else
5713 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pae);
5714
5715 /* RIP must be canonical. */
5716 if (X86_IS_CANONICAL(pVmcs->u64HostRip.u))
5717 { /* likely */ }
5718 else
5719 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRip);
5720 }
5721 else
5722 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostLongMode);
5723 }
5724 else
5725 {
5726 /* Logical processor is outside IA-32e mode. */
5727 if ( !fGstInLongMode
5728 && !fHostInLongMode)
5729 {
5730 /* PCIDE should not be set. */
5731 if (!(pVmcs->u64HostCr4.u & X86_CR4_PCIDE))
5732 { /* likely */ }
5733 else
5734 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostCr4Pcide);
5735
5736 /* The high 32-bits of RIP MBZ. */
5737 if (!pVmcs->u64HostRip.s.Hi)
5738 { /* likely */ }
5739 else
5740 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostRipRsvd);
5741 }
5742 else
5743 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongMode);
5744 }
5745 }
5746 else
5747 {
5748 /* Host address-space size for 32-bit CPUs. */
5749 if ( !fGstInLongMode
5750 && !fHostInLongMode)
5751 { /* likely */ }
5752 else
5753 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_HostGuestLongModeNoCpu);
5754 }
5755
5756 NOREF(pszInstr);
5757 NOREF(pszFailure);
5758 return VINF_SUCCESS;
5759}
5760
5761
5762/**
5763 * Checks VM-entry controls fields as part of VM-entry.
5764 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
5765 *
5766 * @returns VBox status code.
5767 * @param pVCpu The cross context virtual CPU structure.
5768 * @param pszInstr The VMX instruction name (for logging purposes).
5769 */
5770IEM_STATIC int iemVmxVmentryCheckEntryCtls(PVMCPU pVCpu, const char *pszInstr)
5771{
5772 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5773 const char * const pszFailure = "VMFail";
5774
5775 /* VM-entry controls. */
5776 VMXCTLSMSR EntryCtls;
5777 EntryCtls.u = CPUMGetGuestIa32VmxEntryCtls(pVCpu);
5778 if (~pVmcs->u32EntryCtls & EntryCtls.n.disallowed0)
5779 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsDisallowed0);
5780
5781 if (pVmcs->u32EntryCtls & ~EntryCtls.n.allowed1)
5782 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryCtlsAllowed1);
5783
5784 /* Event injection. */
5785 uint32_t const uIntInfo = pVmcs->u32EntryIntInfo;
5786 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VALID))
5787 {
5788 /* Type and vector. */
5789 uint8_t const uType = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_TYPE);
5790 uint8_t const uVector = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_VECTOR);
5791 uint8_t const uRsvd = RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_RSVD_12_30);
5792 if ( !uRsvd
5793 && HMVmxIsEntryIntInfoTypeValid(IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxMonitorTrapFlag, uType)
5794 && HMVmxIsEntryIntInfoVectorValid(uVector, uType))
5795 { /* likely */ }
5796 else
5797 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd);
5798
5799 /* Exception error code. */
5800 if (RT_BF_GET(uIntInfo, VMX_BF_ENTRY_INT_INFO_ERR_CODE_VALID))
5801 {
5802 /* Delivery possible only in Unrestricted-guest mode when CR0.PE is set. */
5803 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
5804 || (pVmcs->u64GuestCr0.s.Lo & X86_CR0_PE))
5805 { /* likely */ }
5806 else
5807 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodePe);
5808
5809 /* Exceptions that provide an error code. */
5810 if ( uType == VMX_ENTRY_INT_INFO_TYPE_HW_XCPT
5811 && ( uVector == X86_XCPT_DF
5812 || uVector == X86_XCPT_TS
5813 || uVector == X86_XCPT_NP
5814 || uVector == X86_XCPT_SS
5815 || uVector == X86_XCPT_GP
5816 || uVector == X86_XCPT_PF
5817 || uVector == X86_XCPT_AC))
5818 { /* likely */ }
5819 else
5820 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec);
5821
5822 /* Exception error-code reserved bits. */
5823 if (!(pVmcs->u32EntryXcptErrCode & ~VMX_ENTRY_INT_XCPT_ERR_CODE_VALID_MASK))
5824 { /* likely */ }
5825 else
5826 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd);
5827
5828 /* Injecting a software interrupt, software exception or privileged software exception. */
5829 if ( uType == VMX_ENTRY_INT_INFO_TYPE_SW_INT
5830 || uType == VMX_ENTRY_INT_INFO_TYPE_SW_XCPT
5831 || uType == VMX_ENTRY_INT_INFO_TYPE_PRIV_SW_XCPT)
5832 {
5833 /* Instruction length must be in the range 0-15. */
5834 if (pVmcs->u32EntryInstrLen <= VMX_ENTRY_INSTR_LEN_MAX)
5835 { /* likely */ }
5836 else
5837 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLen);
5838
5839 /* Instruction length of 0 is allowed only when its CPU feature is present. */
5840 if ( pVmcs->u32EntryInstrLen == 0
5841 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxEntryInjectSoftInt)
5842 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_EntryInstrLenZero);
5843 }
5844 }
5845 }
5846
5847 /* VM-entry MSR-load count and VM-entry MSR-load area address. */
5848 if (pVmcs->u32EntryMsrLoadCount)
5849 {
5850 if ( (pVmcs->u64AddrEntryMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
5851 || (pVmcs->u64AddrEntryMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5852 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrEntryMsrLoad.u))
5853 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrEntryMsrLoad);
5854 }
5855
5856 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_ENTRY_TO_SMM)); /* We don't support SMM yet. */
5857 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MON)); /* We don't support dual-monitor treatment yet. */
5858
5859 NOREF(pszInstr);
5860 NOREF(pszFailure);
5861 return VINF_SUCCESS;
5862}
5863
5864
5865/**
5866 * Checks VM-exit controls fields as part of VM-entry.
5867 * See Intel spec. 26.2.1.2 "VM-Exit Control Fields".
5868 *
5869 * @returns VBox status code.
5870 * @param pVCpu The cross context virtual CPU structure.
5871 * @param pszInstr The VMX instruction name (for logging purposes).
5872 */
5873IEM_STATIC int iemVmxVmentryCheckExitCtls(PVMCPU pVCpu, const char *pszInstr)
5874{
5875 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5876 const char * const pszFailure = "VMFail";
5877
5878 /* VM-exit controls. */
5879 VMXCTLSMSR ExitCtls;
5880 ExitCtls.u = CPUMGetGuestIa32VmxExitCtls(pVCpu);
5881 if (~pVmcs->u32ExitCtls & ExitCtls.n.disallowed0)
5882 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsDisallowed0);
5883
5884 if (pVmcs->u32ExitCtls & ~ExitCtls.n.allowed1)
5885 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ExitCtlsAllowed1);
5886
5887 /* Save preemption timer without activating it. */
5888 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
5889 && (pVmcs->u32ProcCtls & VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER))
5890 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_SavePreemptTimer);
5891
5892 /* VM-exit MSR-store count and VM-exit MSR-store area address. */
5893 if (pVmcs->u32ExitMsrStoreCount)
5894 {
5895 if ( (pVmcs->u64AddrExitMsrStore.u & VMX_AUTOMSR_OFFSET_MASK)
5896 || (pVmcs->u64AddrExitMsrStore.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5897 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrStore.u))
5898 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrStore);
5899 }
5900
5901 /* VM-exit MSR-load count and VM-exit MSR-load area address. */
5902 if (pVmcs->u32ExitMsrLoadCount)
5903 {
5904 if ( (pVmcs->u64AddrExitMsrLoad.u & VMX_AUTOMSR_OFFSET_MASK)
5905 || (pVmcs->u64AddrExitMsrLoad.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5906 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrExitMsrLoad.u))
5907 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrExitMsrLoad);
5908 }
5909
5910 NOREF(pszInstr);
5911 NOREF(pszFailure);
5912 return VINF_SUCCESS;
5913}
5914
5915
5916/**
5917 * Checks VM-execution controls fields as part of VM-entry.
5918 * See Intel spec. 26.2.1.1 "VM-Execution Control Fields".
5919 *
5920 * @returns VBox status code.
5921 * @param pVCpu The cross context virtual CPU structure.
5922 * @param pszInstr The VMX instruction name (for logging purposes).
5923 *
5924 * @remarks This may update secondary-processor based VM-execution control fields
5925 * in the current VMCS if necessary.
5926 */
5927IEM_STATIC int iemVmxVmentryCheckExecCtls(PVMCPU pVCpu, const char *pszInstr)
5928{
5929 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
5930 const char * const pszFailure = "VMFail";
5931
5932 /* Pin-based VM-execution controls. */
5933 {
5934 VMXCTLSMSR PinCtls;
5935 PinCtls.u = CPUMGetGuestIa32VmxPinbasedCtls(pVCpu);
5936 if (~pVmcs->u32PinCtls & PinCtls.n.disallowed0)
5937 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsDisallowed0);
5938
5939 if (pVmcs->u32PinCtls & ~PinCtls.n.allowed1)
5940 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_PinCtlsAllowed1);
5941 }
5942
5943 /* Processor-based VM-execution controls. */
5944 {
5945 VMXCTLSMSR ProcCtls;
5946 ProcCtls.u = CPUMGetGuestIa32VmxProcbasedCtls(pVCpu);
5947 if (~pVmcs->u32ProcCtls & ProcCtls.n.disallowed0)
5948 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsDisallowed0);
5949
5950 if (pVmcs->u32ProcCtls & ~ProcCtls.n.allowed1)
5951 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtlsAllowed1);
5952 }
5953
5954 /* Secondary processor-based VM-execution controls. */
5955 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
5956 {
5957 VMXCTLSMSR ProcCtls2;
5958 ProcCtls2.u = CPUMGetGuestIa32VmxProcbasedCtls2(pVCpu);
5959 if (~pVmcs->u32ProcCtls2 & ProcCtls2.n.disallowed0)
5960 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Disallowed0);
5961
5962 if (pVmcs->u32ProcCtls2 & ~ProcCtls2.n.allowed1)
5963 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ProcCtls2Allowed1);
5964 }
5965 else
5966 Assert(!pVmcs->u32ProcCtls2);
5967
5968 /* CR3-target count. */
5969 if (pVmcs->u32Cr3TargetCount <= VMX_V_CR3_TARGET_COUNT)
5970 { /* likely */ }
5971 else
5972 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Cr3TargetCount);
5973
5974 /* I/O bitmaps physical addresses. */
5975 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_IO_BITMAPS)
5976 {
5977 if ( (pVmcs->u64AddrIoBitmapA.u & X86_PAGE_4K_OFFSET_MASK)
5978 || (pVmcs->u64AddrIoBitmapA.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5979 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapA.u))
5980 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapA);
5981
5982 if ( (pVmcs->u64AddrIoBitmapB.u & X86_PAGE_4K_OFFSET_MASK)
5983 || (pVmcs->u64AddrIoBitmapB.u >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5984 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), pVmcs->u64AddrIoBitmapB.u))
5985 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrIoBitmapB);
5986 }
5987
5988 /* MSR bitmap physical address. */
5989 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
5990 {
5991 RTGCPHYS const GCPhysMsrBitmap = pVmcs->u64AddrMsrBitmap.u;
5992 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)
5993 || (GCPhysMsrBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
5994 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysMsrBitmap))
5995 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrMsrBitmap);
5996
5997 /* Read the MSR bitmap. */
5998 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
5999 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap),
6000 GCPhysMsrBitmap, VMX_V_MSR_BITMAP_SIZE);
6001 if (RT_FAILURE(rc))
6002 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrBitmapPtrReadPhys);
6003 }
6004
6005 /* TPR shadow related controls. */
6006 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
6007 {
6008 /* Virtual-APIC page physical address. */
6009 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
6010 if ( (GCPhysVirtApic & X86_PAGE_4K_OFFSET_MASK)
6011 || (GCPhysVirtApic >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6012 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVirtApic))
6013 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVirtApicPage);
6014
6015 /* Read the Virtual-APIC page. */
6016 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage));
6017 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage),
6018 GCPhysVirtApic, VMX_V_VIRT_APIC_PAGES);
6019 if (RT_FAILURE(rc))
6020 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys);
6021
6022 /* TPR threshold without virtual-interrupt delivery. */
6023 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
6024 && (pVmcs->u32TprThreshold & ~VMX_TPR_THRESHOLD_MASK))
6025 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdRsvd);
6026
6027 /* TPR threshold and VTPR. */
6028 uint8_t const *pbVirtApic = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVirtApicPage);
6029 uint8_t const u8VTpr = *(pbVirtApic + XAPIC_OFF_TPR);
6030 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
6031 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
6032 && RT_BF_GET(pVmcs->u32TprThreshold, VMX_BF_TPR_THRESHOLD_TPR) > ((u8VTpr >> 4) & UINT32_C(0xf)) /* Bits 4:7 */)
6033 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_TprThresholdVTpr);
6034 }
6035 else
6036 {
6037 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6038 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
6039 && !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY))
6040 { /* likely */ }
6041 else
6042 {
6043 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6044 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicTprShadow);
6045 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_APIC_REG_VIRT)
6046 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_ApicRegVirt);
6047 Assert(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY);
6048 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtIntDelivery);
6049 }
6050 }
6051
6052 /* NMI exiting and virtual-NMIs. */
6053 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_NMI_EXIT)
6054 && (pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI))
6055 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtNmi);
6056
6057 /* Virtual-NMIs and NMI-window exiting. */
6058 if ( !(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
6059 && (pVmcs->u32ProcCtls & VMX_PROC_CTLS_NMI_WINDOW_EXIT))
6060 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_NmiWindowExit);
6061
6062 /* Virtualize APIC accesses. */
6063 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
6064 {
6065 /* APIC-access physical address. */
6066 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
6067 if ( (GCPhysApicAccess & X86_PAGE_4K_OFFSET_MASK)
6068 || (GCPhysApicAccess >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6069 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
6070 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccess);
6071
6072 /*
6073 * Disallow APIC-access page and virtual-APIC page from being the same address.
6074 * Note! This is not an Intel requirement, but one imposed by our implementation.
6075 */
6076 /** @todo r=ramshankar: This is done primarily to simplify recursion scenarios while
6077 * redirecting accesses between the APIC-access page and the virtual-APIC
6078 * page. If any nested hypervisor requires this, we can implement it later. */
6079 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)
6080 {
6081 RTGCPHYS const GCPhysVirtApic = pVmcs->u64AddrVirtApic.u;
6082 if (GCPhysVirtApic == GCPhysApicAccess)
6083 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessEqVirtApic);
6084 }
6085
6086 /* Register the handler for the APIC-access page. */
6087 int rc = CPUMVmxApicAccessPageRegister(pVCpu, GCPhysApicAccess);
6088 if (RT_FAILURE(rc))
6089 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrApicAccessHandlerReg);
6090 }
6091
6092 /* Virtualize-x2APIC mode is mutually exclusive with virtualize-APIC accesses. */
6093 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_X2APIC_MODE)
6094 && (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
6095 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
6096
6097 /* Virtual-interrupt delivery requires external interrupt exiting. */
6098 if ( (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VIRT_INT_DELIVERY)
6099 && !(pVmcs->u32PinCtls & VMX_PIN_CTLS_EXT_INT_EXIT))
6100 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VirtX2ApicVirtApic);
6101
6102 /* VPID. */
6103 if ( !(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VPID)
6104 || pVmcs->u16Vpid != 0)
6105 { /* likely */ }
6106 else
6107 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_Vpid);
6108
6109 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_POSTED_INT)); /* We don't support posted interrupts yet. */
6110 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT)); /* We don't support EPT yet. */
6111 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PML)); /* We don't support PML yet. */
6112 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)); /* We don't support Unrestricted-guests yet. */
6113 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMFUNC)); /* We don't support VM functions yet. */
6114 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT_VE)); /* We don't support EPT-violation #VE yet. */
6115 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)); /* We don't support Pause-loop exiting yet. */
6116
6117 /* VMCS shadowing. */
6118 if (pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_VMCS_SHADOWING)
6119 {
6120 /* VMREAD-bitmap physical address. */
6121 RTGCPHYS const GCPhysVmreadBitmap = pVmcs->u64AddrVmreadBitmap.u;
6122 if ( ( GCPhysVmreadBitmap & X86_PAGE_4K_OFFSET_MASK)
6123 || ( GCPhysVmreadBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6124 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmreadBitmap))
6125 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmreadBitmap);
6126
6127 /* VMWRITE-bitmap physical address. */
6128 RTGCPHYS const GCPhysVmwriteBitmap = pVmcs->u64AddrVmreadBitmap.u;
6129 if ( ( GCPhysVmwriteBitmap & X86_PAGE_4K_OFFSET_MASK)
6130 || ( GCPhysVmwriteBitmap >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
6131 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmwriteBitmap))
6132 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_AddrVmwriteBitmap);
6133
6134 /* Read the VMREAD-bitmap. */
6135 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
6136 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap),
6137 GCPhysVmreadBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
6138 if (RT_FAILURE(rc))
6139 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys);
6140
6141 /* Read the VMWRITE-bitmap. */
6142 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap));
6143 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap),
6144 GCPhysVmwriteBitmap, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
6145 if (RT_FAILURE(rc))
6146 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys);
6147 }
6148
6149 NOREF(pszInstr);
6150 NOREF(pszFailure);
6151 return VINF_SUCCESS;
6152}
6153
6154
6155/**
6156 * Loads the guest control registers, debug register and some MSRs as part of
6157 * VM-entry.
6158 *
6159 * @param pVCpu The cross context virtual CPU structure.
6160 */
6161IEM_STATIC void iemVmxVmentryLoadGuestControlRegsMsrs(PVMCPU pVCpu)
6162{
6163 /*
6164 * Load guest control registers, debug registers and MSRs.
6165 * See Intel spec. 26.3.2.1 "Loading Guest Control Registers, Debug Registers and MSRs".
6166 */
6167 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6168 uint64_t const uGstCr0 = (pVmcs->u64GuestCr0.u & ~VMX_ENTRY_CR0_IGNORE_MASK)
6169 | (pVCpu->cpum.GstCtx.cr0 & VMX_ENTRY_CR0_IGNORE_MASK);
6170 CPUMSetGuestCR0(pVCpu, uGstCr0);
6171 CPUMSetGuestCR4(pVCpu, pVmcs->u64GuestCr4.u);
6172 pVCpu->cpum.GstCtx.cr3 = pVmcs->u64GuestCr3.u;
6173
6174 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
6175 pVCpu->cpum.GstCtx.dr[7] = (pVmcs->u64GuestDr7.u & ~VMX_ENTRY_DR7_MBZ_MASK) | VMX_ENTRY_DR7_MB1_MASK;
6176
6177 pVCpu->cpum.GstCtx.SysEnter.eip = pVmcs->u64GuestSysenterEip.s.Lo;
6178 pVCpu->cpum.GstCtx.SysEnter.esp = pVmcs->u64GuestSysenterEsp.s.Lo;
6179 pVCpu->cpum.GstCtx.SysEnter.cs = pVmcs->u32GuestSysenterCS;
6180
6181 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fLongMode)
6182 {
6183 /* FS base and GS base are loaded while loading the rest of the guest segment registers. */
6184
6185 /* EFER MSR. */
6186 if (!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR))
6187 {
6188 bool const fGstInLongMode = RT_BOOL(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_IA32E_MODE_GUEST);
6189 bool const fGstPaging = RT_BOOL(uGstCr0 & X86_CR0_PG);
6190 uint64_t const uHostEfer = pVCpu->cpum.GstCtx.msrEFER;
6191 if (fGstInLongMode)
6192 {
6193 /* If the nested-guest is in long mode, LMA and LME are both set. */
6194 Assert(fGstPaging);
6195 pVCpu->cpum.GstCtx.msrEFER = uHostEfer | (MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
6196 }
6197 else
6198 {
6199 /*
6200 * If the nested-guest is outside long mode:
6201 * - With paging: LMA is cleared, LME is cleared.
6202 * - Without paging: LMA is cleared, LME is left unmodified.
6203 */
6204 uint64_t const fLmaLmeMask = MSR_K6_EFER_LMA | (fGstPaging ? MSR_K6_EFER_LME : 0);
6205 pVCpu->cpum.GstCtx.msrEFER = uHostEfer & ~fLmaLmeMask;
6206 }
6207 }
6208 /* else: see below. */
6209 }
6210
6211 /* PAT MSR. */
6212 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PAT_MSR)
6213 pVCpu->cpum.GstCtx.msrPAT = pVmcs->u64GuestPatMsr.u;
6214
6215 /* EFER MSR. */
6216 if (pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
6217 pVCpu->cpum.GstCtx.msrEFER = pVmcs->u64GuestEferMsr.u;
6218
6219 /* We don't support IA32_PERF_GLOBAL_CTRL MSR yet. */
6220 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_PERF_MSR));
6221
6222 /* We don't support IA32_BNDCFGS MSR yet. */
6223 Assert(!(pVmcs->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_BNDCFGS_MSR));
6224
6225 /* Nothing to do for SMBASE register - We don't support SMM yet. */
6226}
6227
6228
6229/**
6230 * Loads the guest segment registers, GDTR, IDTR, LDTR and TR as part of VM-entry.
6231 *
6232 * @param pVCpu The cross context virtual CPU structure.
6233 */
6234IEM_STATIC void iemVmxVmentryLoadGuestSegRegs(PVMCPU pVCpu)
6235{
6236 /*
6237 * Load guest segment registers, GDTR, IDTR, LDTR and TR.
6238 * See Intel spec. 26.3.2.2 "Loading Guest Segment Registers and Descriptor-Table Registers".
6239 */
6240 /* CS, SS, ES, DS, FS, GS. */
6241 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6242 for (unsigned iSegReg = 0; iSegReg < X86_SREG_COUNT; iSegReg++)
6243 {
6244 PCPUMSELREG pGstSelReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6245 CPUMSELREG VmcsSelReg;
6246 int rc = iemVmxVmcsGetGuestSegReg(pVmcs, iSegReg, &VmcsSelReg);
6247 AssertRC(rc); NOREF(rc);
6248 if (!(VmcsSelReg.Attr.u & X86DESCATTR_UNUSABLE))
6249 {
6250 pGstSelReg->Sel = VmcsSelReg.Sel;
6251 pGstSelReg->ValidSel = VmcsSelReg.Sel;
6252 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6253 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6254 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
6255 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
6256 }
6257 else
6258 {
6259 pGstSelReg->Sel = VmcsSelReg.Sel;
6260 pGstSelReg->ValidSel = VmcsSelReg.Sel;
6261 pGstSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
6262 switch (iSegReg)
6263 {
6264 case X86_SREG_CS:
6265 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6266 pGstSelReg->u32Limit = VmcsSelReg.u32Limit;
6267 pGstSelReg->Attr.u = VmcsSelReg.Attr.u;
6268 break;
6269
6270 case X86_SREG_SS:
6271 pGstSelReg->u64Base = VmcsSelReg.u64Base & UINT32_C(0xfffffff0);
6272 pGstSelReg->u32Limit = 0;
6273 pGstSelReg->Attr.u = (VmcsSelReg.Attr.u & X86DESCATTR_DPL) | X86DESCATTR_D | X86DESCATTR_UNUSABLE;
6274 break;
6275
6276 case X86_SREG_ES:
6277 case X86_SREG_DS:
6278 pGstSelReg->u64Base = 0;
6279 pGstSelReg->u32Limit = 0;
6280 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
6281 break;
6282
6283 case X86_SREG_FS:
6284 case X86_SREG_GS:
6285 pGstSelReg->u64Base = VmcsSelReg.u64Base;
6286 pGstSelReg->u32Limit = 0;
6287 pGstSelReg->Attr.u = X86DESCATTR_UNUSABLE;
6288 break;
6289 }
6290 Assert(pGstSelReg->Attr.n.u1Unusable);
6291 }
6292 }
6293
6294 /* LDTR. */
6295 pVCpu->cpum.GstCtx.ldtr.Sel = pVmcs->GuestLdtr;
6296 pVCpu->cpum.GstCtx.ldtr.ValidSel = pVmcs->GuestLdtr;
6297 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
6298 if (!(pVmcs->u32GuestLdtrAttr & X86DESCATTR_UNUSABLE))
6299 {
6300 pVCpu->cpum.GstCtx.ldtr.u64Base = pVmcs->u64GuestLdtrBase.u;
6301 pVCpu->cpum.GstCtx.ldtr.u32Limit = pVmcs->u32GuestLdtrLimit;
6302 pVCpu->cpum.GstCtx.ldtr.Attr.u = pVmcs->u32GuestLdtrAttr;
6303 }
6304 else
6305 {
6306 pVCpu->cpum.GstCtx.ldtr.u64Base = 0;
6307 pVCpu->cpum.GstCtx.ldtr.u32Limit = 0;
6308 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESCATTR_UNUSABLE;
6309 }
6310
6311 /* TR. */
6312 Assert(!(pVmcs->u32GuestTrAttr & X86DESCATTR_UNUSABLE));
6313 pVCpu->cpum.GstCtx.tr.Sel = pVmcs->GuestTr;
6314 pVCpu->cpum.GstCtx.tr.ValidSel = pVmcs->GuestTr;
6315 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
6316 pVCpu->cpum.GstCtx.tr.u64Base = pVmcs->u64GuestTrBase.u;
6317 pVCpu->cpum.GstCtx.tr.u32Limit = pVmcs->u32GuestTrLimit;
6318 pVCpu->cpum.GstCtx.tr.Attr.u = pVmcs->u32GuestTrAttr;
6319
6320 /* GDTR. */
6321 pVCpu->cpum.GstCtx.gdtr.cbGdt = pVmcs->u32GuestGdtrLimit;
6322 pVCpu->cpum.GstCtx.gdtr.pGdt = pVmcs->u64GuestGdtrBase.u;
6323
6324 /* IDTR. */
6325 pVCpu->cpum.GstCtx.idtr.cbIdt = pVmcs->u32GuestIdtrLimit;
6326 pVCpu->cpum.GstCtx.idtr.pIdt = pVmcs->u64GuestIdtrBase.u;
6327}
6328
6329
6330/**
6331 * Loads the guest MSRs from the VM-entry auto-load MSRs as part of VM-entry.
6332 *
6333 * @returns VBox status code.
6334 * @param pVCpu The cross context virtual CPU structure.
6335 * @param pszInstr The VMX instruction name (for logging purposes).
6336 */
6337IEM_STATIC int iemVmxVmentryLoadGuestAutoMsrs(PVMCPU pVCpu, const char *pszInstr)
6338{
6339 /*
6340 * Load guest MSRs.
6341 * See Intel spec. 26.4 "Loading MSRs".
6342 */
6343 PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6344 const char *const pszFailure = "VM-exit";
6345
6346 /*
6347 * The VM-entry MSR-load area address need not be a valid guest-physical address if the
6348 * VM-entry MSR load count is 0. If this is the case, bail early without reading it.
6349 * See Intel spec. 24.8.2 "VM-Entry Controls for MSRs".
6350 */
6351 uint32_t const cMsrs = pVmcs->u32EntryMsrLoadCount;
6352 if (!cMsrs)
6353 return VINF_SUCCESS;
6354
6355 /*
6356 * Verify the MSR auto-load count. Physical CPUs can behave unpredictably if the count is
6357 * exceeded including possibly raising #MC exceptions during VMX transition. Our
6358 * implementation shall fail VM-entry with an VMX_EXIT_ERR_MSR_LOAD VM-exit.
6359 */
6360 bool const fIsMsrCountValid = iemVmxIsAutoMsrCountValid(pVCpu, cMsrs);
6361 if (fIsMsrCountValid)
6362 { /* likely */ }
6363 else
6364 {
6365 iemVmxVmcsSetExitQual(pVCpu, VMX_V_AUTOMSR_AREA_SIZE / sizeof(VMXAUTOMSR));
6366 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadCount);
6367 }
6368
6369 RTGCPHYS const GCPhysAutoMsrArea = pVmcs->u64AddrEntryMsrLoad.u;
6370 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), (void *)&pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea),
6371 GCPhysAutoMsrArea, VMX_V_AUTOMSR_AREA_SIZE);
6372 if (RT_SUCCESS(rc))
6373 {
6374 PVMXAUTOMSR pMsr = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pAutoMsrArea);
6375 Assert(pMsr);
6376 for (uint32_t idxMsr = 0; idxMsr < cMsrs; idxMsr++, pMsr++)
6377 {
6378 if ( !pMsr->u32Reserved
6379 && pMsr->u32Msr != MSR_K8_FS_BASE
6380 && pMsr->u32Msr != MSR_K8_GS_BASE
6381 && pMsr->u32Msr != MSR_K6_EFER
6382 && pMsr->u32Msr != MSR_IA32_SMM_MONITOR_CTL
6383 && pMsr->u32Msr >> 8 != MSR_IA32_X2APIC_START >> 8)
6384 {
6385 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pMsr->u32Msr, pMsr->u64Value);
6386 if (rcStrict == VINF_SUCCESS)
6387 continue;
6388
6389 /*
6390 * If we're in ring-0, we cannot handle returns to ring-3 at this point and continue VM-entry.
6391 * If any guest hypervisor loads MSRs that require ring-3 handling, we cause a VM-entry failure
6392 * recording the MSR index in the VM-exit qualification (as per the Intel spec.) and indicated
6393 * further by our own, specific diagnostic code. Later, we can try implement handling of the
6394 * MSR in ring-0 if possible, or come up with a better, generic solution.
6395 */
6396 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
6397 VMXVDIAG const enmDiag = rcStrict == VINF_CPUM_R3_MSR_WRITE
6398 ? kVmxVDiag_Vmentry_MsrLoadRing3
6399 : kVmxVDiag_Vmentry_MsrLoad;
6400 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, enmDiag);
6401 }
6402 else
6403 {
6404 iemVmxVmcsSetExitQual(pVCpu, idxMsr);
6405 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadRsvd);
6406 }
6407 }
6408 }
6409 else
6410 {
6411 AssertMsgFailed(("%s: Failed to read MSR auto-load area at %#RGp, rc=%Rrc\n", pszInstr, GCPhysAutoMsrArea, rc));
6412 IEM_VMX_VMENTRY_FAILED_RET(pVCpu, pszInstr, pszFailure, kVmxVDiag_Vmentry_MsrLoadPtrReadPhys);
6413 }
6414
6415 NOREF(pszInstr);
6416 NOREF(pszFailure);
6417 return VINF_SUCCESS;
6418}
6419
6420
6421/**
6422 * Loads the guest-state non-register state as part of VM-entry.
6423 *
6424 * @returns VBox status code.
6425 * @param pVCpu The cross context virtual CPU structure.
6426 *
6427 * @remarks This must be called only after loading the nested-guest register state
6428 * (especially nested-guest RIP).
6429 */
6430IEM_STATIC void iemVmxVmentryLoadGuestNonRegState(PVMCPU pVCpu)
6431{
6432 /*
6433 * Load guest non-register state.
6434 * See Intel spec. 26.6 "Special Features of VM Entry"
6435 */
6436 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6437 uint32_t const uEntryIntInfo = pVmcs->u32EntryIntInfo;
6438 if (VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
6439 {
6440 /** @todo NSTVMX: Pending debug exceptions. */
6441 Assert(!(pVmcs->u64GuestPendingDbgXcpt.u));
6442
6443 if (pVmcs->u32GuestIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
6444 {
6445 /** @todo NSTVMX: Virtual-NMIs doesn't affect NMI blocking in the normal sense.
6446 * We probably need a different force flag for virtual-NMI
6447 * pending/blocking. */
6448 Assert(!(pVmcs->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI));
6449 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6450 }
6451 else
6452 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS));
6453
6454 if (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
6455 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
6456 else
6457 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
6458
6459 /* SMI blocking is irrelevant. We don't support SMIs yet. */
6460 }
6461
6462 /* Loading PDPTEs will be taken care when we switch modes. We don't support EPT yet. */
6463 Assert(!(pVmcs->u32ProcCtls2 & VMX_PROC_CTLS2_EPT));
6464
6465 /* VPID is irrelevant. We don't support VPID yet. */
6466
6467 /* Clear address-range monitoring. */
6468 EMMonitorWaitClear(pVCpu);
6469}
6470
6471
6472/**
6473 * Loads the guest-state as part of VM-entry.
6474 *
6475 * @returns VBox status code.
6476 * @param pVCpu The cross context virtual CPU structure.
6477 * @param pszInstr The VMX instruction name (for logging purposes).
6478 *
6479 * @remarks This must be done after all the necessary steps prior to loading of
6480 * guest-state (e.g. checking various VMCS state).
6481 */
6482IEM_STATIC int iemVmxVmentryLoadGuestState(PVMCPU pVCpu, const char *pszInstr)
6483{
6484 iemVmxVmentryLoadGuestControlRegsMsrs(pVCpu);
6485 iemVmxVmentryLoadGuestSegRegs(pVCpu);
6486
6487 /*
6488 * Load guest RIP, RSP and RFLAGS.
6489 * See Intel spec. 26.3.2.3 "Loading Guest RIP, RSP and RFLAGS".
6490 */
6491 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6492 pVCpu->cpum.GstCtx.rsp = pVmcs->u64GuestRsp.u;
6493 pVCpu->cpum.GstCtx.rip = pVmcs->u64GuestRip.u;
6494 pVCpu->cpum.GstCtx.rflags.u = pVmcs->u64GuestRFlags.u;
6495
6496 /* Initialize the PAUSE-loop controls as part of VM-entry. */
6497 pVCpu->cpum.GstCtx.hwvirt.vmx.uFirstPauseLoopTick = 0;
6498 pVCpu->cpum.GstCtx.hwvirt.vmx.uPrevPauseTick = 0;
6499
6500 iemVmxVmentryLoadGuestNonRegState(pVCpu);
6501
6502 NOREF(pszInstr);
6503 return VINF_SUCCESS;
6504}
6505
6506
6507/**
6508 * Set up the VMX-preemption timer.
6509 *
6510 * @param pVCpu The cross context virtual CPU structure.
6511 * @param pszInstr The VMX instruction name (for logging purposes).
6512 */
6513IEM_STATIC void iemVmxVmentrySetupPreemptTimer(PVMCPU pVCpu, const char *pszInstr)
6514{
6515 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6516 Assert(pVmcs);
6517 if (pVmcs->u32PinCtls & VMX_PIN_CTLS_PREEMPT_TIMER)
6518 {
6519 uint64_t const uVmentryTick = TMCpuTickGetNoCheck(pVCpu);
6520 pVCpu->cpum.GstCtx.hwvirt.vmx.uVmentryTick = uVmentryTick;
6521 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER);
6522
6523 Log(("%s: VM-entry set up VMX-preemption timer at %#RX64\n", pszInstr, uVmentryTick));
6524 }
6525 else
6526 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
6527
6528 NOREF(pszInstr);
6529}
6530
6531
6532/**
6533 * Performs event injection (if any) as part of VM-entry.
6534 *
6535 * @param pVCpu The cross context virtual CPU structure.
6536 * @param pszInstr The VMX instruction name (for logging purposes).
6537 */
6538IEM_STATIC int iemVmxVmentryInjectEvent(PVMCPU pVCpu, const char *pszInstr)
6539{
6540 /*
6541 * Inject events.
6542 * See Intel spec. 26.5 "Event Injection".
6543 */
6544 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6545 uint32_t const uEntryIntInfo = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->u32EntryIntInfo;
6546 if (VMX_ENTRY_INT_INFO_IS_VALID(uEntryIntInfo))
6547 {
6548 /*
6549 * The event that is going to be made pending for injection is not subject to VMX intercepts,
6550 * thus we flag ignoring of intercepts. However, recursive exceptions if any during delivery
6551 * of the current event -are- subject to intercepts, hence this flag will be flipped during
6552 * the actually delivery of this event.
6553 */
6554 pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents = false;
6555
6556 uint8_t const uType = VMX_ENTRY_INT_INFO_TYPE(uEntryIntInfo);
6557 if (uType == VMX_ENTRY_INT_INFO_TYPE_OTHER_EVENT)
6558 {
6559 Assert(VMX_ENTRY_INT_INFO_VECTOR(uEntryIntInfo) == VMX_ENTRY_INT_INFO_VECTOR_MTF);
6560 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_MTF);
6561 return VINF_SUCCESS;
6562 }
6563
6564 int rc = HMVmxEntryIntInfoInjectTrpmEvent(pVCpu, uEntryIntInfo, pVmcs->u32EntryXcptErrCode, pVmcs->u32EntryInstrLen,
6565 pVCpu->cpum.GstCtx.cr2);
6566 AssertRCReturn(rc, rc);
6567 }
6568
6569 NOREF(pszInstr);
6570 return VINF_SUCCESS;
6571}
6572
6573
6574/**
6575 * VMLAUNCH/VMRESUME instruction execution worker.
6576 *
6577 * @returns Strict VBox status code.
6578 * @param pVCpu The cross context virtual CPU structure.
6579 * @param cbInstr The instruction length in bytes.
6580 * @param uInstrId The instruction identity (VMXINSTRID_VMLAUNCH or
6581 * VMXINSTRID_VMRESUME).
6582 * @param pExitInfo Pointer to the VM-exit instruction information struct.
6583 * Optional, can be NULL.
6584 *
6585 * @remarks Common VMX instruction checks are already expected to by the caller,
6586 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
6587 */
6588IEM_STATIC VBOXSTRICTRC iemVmxVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId, PCVMXVEXITINFO pExitInfo)
6589{
6590 Assert( uInstrId == VMXINSTRID_VMLAUNCH
6591 || uInstrId == VMXINSTRID_VMRESUME);
6592 const char *pszInstr = uInstrId == VMXINSTRID_VMRESUME ? "vmresume" : "vmlaunch";
6593
6594 /* Nested-guest intercept. */
6595 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6596 {
6597 if (pExitInfo)
6598 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
6599 uint32_t const uExitReason = uInstrId == VMXINSTRID_VMRESUME ? VMX_EXIT_VMRESUME : VMX_EXIT_VMLAUNCH;
6600 return iemVmxVmexitInstrNeedsInfo(pVCpu, uExitReason, uInstrId, cbInstr);
6601 }
6602
6603 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
6604
6605 /* CPL. */
6606 if (pVCpu->iem.s.uCpl > 0)
6607 {
6608 Log(("%s: CPL %u -> #GP(0)\n", pszInstr, pVCpu->iem.s.uCpl));
6609 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_Cpl;
6610 return iemRaiseGeneralProtectionFault0(pVCpu);
6611 }
6612
6613 /* Current VMCS valid. */
6614 if (!IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
6615 {
6616 Log(("%s: VMCS pointer %#RGp invalid -> VMFailInvalid\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
6617 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrInvalid;
6618 iemVmxVmFailInvalid(pVCpu);
6619 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6620 return VINF_SUCCESS;
6621 }
6622
6623 /** @todo Distinguish block-by-MOV-SS from block-by-STI. Currently we
6624 * use block-by-STI here which is not quite correct. */
6625 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
6626 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
6627 {
6628 Log(("%s: VM entry with events blocked by MOV SS -> VMFail\n", pszInstr));
6629 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_BlocKMovSS;
6630 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_BLOCK_MOVSS);
6631 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6632 return VINF_SUCCESS;
6633 }
6634
6635 if (uInstrId == VMXINSTRID_VMLAUNCH)
6636 {
6637 /* VMLAUNCH with non-clear VMCS. */
6638 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_STATE_CLEAR)
6639 { /* likely */ }
6640 else
6641 {
6642 Log(("vmlaunch: VMLAUNCH with non-clear VMCS -> VMFail\n"));
6643 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsClear;
6644 iemVmxVmFail(pVCpu, VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS);
6645 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6646 return VINF_SUCCESS;
6647 }
6648 }
6649 else
6650 {
6651 /* VMRESUME with non-launched VMCS. */
6652 if (pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState == VMX_V_VMCS_STATE_LAUNCHED)
6653 { /* likely */ }
6654 else
6655 {
6656 Log(("vmresume: VMRESUME with non-launched VMCS -> VMFail\n"));
6657 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_VmcsLaunch;
6658 iemVmxVmFail(pVCpu, VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS);
6659 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6660 return VINF_SUCCESS;
6661 }
6662 }
6663
6664 /*
6665 * Load the current VMCS.
6666 */
6667 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
6668 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs),
6669 IEM_VMX_GET_CURRENT_VMCS(pVCpu), VMX_V_VMCS_SIZE);
6670 if (RT_FAILURE(rc))
6671 {
6672 Log(("%s: Failed to read VMCS at %#RGp, rc=%Rrc\n", pszInstr, IEM_VMX_GET_CURRENT_VMCS(pVCpu), rc));
6673 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmentry_PtrReadPhys;
6674 return rc;
6675 }
6676
6677 /*
6678 * We are allowed to cache VMCS related data structures (such as I/O bitmaps, MSR bitmaps)
6679 * while entering VMX non-root mode. We do some of this while checking VM-execution
6680 * controls. The guest hypervisor should not make assumptions and cannot expect
6681 * predictable behavior if changes to these structures are made in guest memory while
6682 * executing in VMX non-root mode. As far as VirtualBox is concerned, the guest cannot
6683 * modify them anyway as we cache them in host memory. We are trade memory for speed here.
6684 *
6685 * See Intel spec. 24.11.4 "Software Access to Related Structures".
6686 */
6687 rc = iemVmxVmentryCheckExecCtls(pVCpu, pszInstr);
6688 if (RT_SUCCESS(rc))
6689 {
6690 rc = iemVmxVmentryCheckExitCtls(pVCpu, pszInstr);
6691 if (RT_SUCCESS(rc))
6692 {
6693 rc = iemVmxVmentryCheckEntryCtls(pVCpu, pszInstr);
6694 if (RT_SUCCESS(rc))
6695 {
6696 rc = iemVmxVmentryCheckHostState(pVCpu, pszInstr);
6697 if (RT_SUCCESS(rc))
6698 {
6699 /* Save the guest force-flags as VM-exits can occur from this point on. */
6700 iemVmxVmentrySaveForceFlags(pVCpu);
6701
6702 /* Initialize the VM-exit qualification field as it MBZ for VM-exits where it isn't specified. */
6703 iemVmxVmcsSetExitQual(pVCpu, 0);
6704
6705 rc = iemVmxVmentryCheckGuestState(pVCpu, pszInstr);
6706 if (RT_SUCCESS(rc))
6707 {
6708 rc = iemVmxVmentryLoadGuestState(pVCpu, pszInstr);
6709 if (RT_SUCCESS(rc))
6710 {
6711 rc = iemVmxVmentryLoadGuestAutoMsrs(pVCpu, pszInstr);
6712 if (RT_SUCCESS(rc))
6713 {
6714 Assert(rc != VINF_CPUM_R3_MSR_WRITE);
6715
6716 /* VMLAUNCH instruction must update the VMCS launch state. */
6717 if (uInstrId == VMXINSTRID_VMLAUNCH)
6718 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = VMX_V_VMCS_STATE_LAUNCHED;
6719
6720 /* Perform the VMX transition (PGM updates). */
6721 VBOXSTRICTRC rcStrict = iemVmxWorldSwitch(pVCpu);
6722 if (rcStrict == VINF_SUCCESS)
6723 { /* likely */ }
6724 else if (RT_SUCCESS(rcStrict))
6725 {
6726 Log3(("%s: iemVmxWorldSwitch returns %Rrc -> Setting passup status\n", pszInstr,
6727 VBOXSTRICTRC_VAL(rcStrict)));
6728 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
6729 }
6730 else
6731 {
6732 Log3(("%s: iemVmxWorldSwitch failed! rc=%Rrc\n", pszInstr, VBOXSTRICTRC_VAL(rcStrict)));
6733 return rcStrict;
6734 }
6735
6736 /* We've now entered nested-guest execution. */
6737 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode = true;
6738
6739 /*
6740 * The priority of potential VM-exits during VM-entry is important.
6741 * The priorities of VM-exits and events are listed from highest
6742 * to lowest as follows:
6743 *
6744 * 1. Event injection.
6745 * 2. TPR below threshold / APIC-write.
6746 * 3. SMI.
6747 * 4. INIT.
6748 * 5. MTF exit.
6749 * 6. Pending debug exceptions.
6750 * 7. Debug-trap exceptions.
6751 * 8. VMX-preemption timer.
6752 * 9. NMI-window exit.
6753 * 10. NMI injection.
6754 * 11. Interrupt-window exit.
6755 * 12. Interrupt injection.
6756 */
6757
6758 /* Setup the VMX-preemption timer. */
6759 iemVmxVmentrySetupPreemptTimer(pVCpu, pszInstr);
6760
6761 /* Now that we've switched page tables, we can inject events if any. */
6762 iemVmxVmentryInjectEvent(pVCpu, pszInstr);
6763
6764 return VINF_SUCCESS;
6765 }
6766 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_MSR_LOAD | VMX_EXIT_REASON_ENTRY_FAILED);
6767 }
6768 }
6769 return iemVmxVmexit(pVCpu, VMX_EXIT_ERR_INVALID_GUEST_STATE | VMX_EXIT_REASON_ENTRY_FAILED);
6770 }
6771
6772 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_HOST_STATE);
6773 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6774 return VINF_SUCCESS;
6775 }
6776 }
6777 }
6778
6779 iemVmxVmFail(pVCpu, VMXINSTRERR_VMENTRY_INVALID_CTLS);
6780 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6781 return VINF_SUCCESS;
6782}
6783
6784
6785/**
6786 * Checks whether an RDMSR or WRMSR instruction for the given MSR is intercepted
6787 * (causes a VM-exit) or not.
6788 *
6789 * @returns @c true if the instruction is intercepted, @c false otherwise.
6790 * @param pVCpu The cross context virtual CPU structure.
6791 * @param uExitReason The VM-exit exit reason (VMX_EXIT_RDMSR or
6792 * VMX_EXIT_WRMSR).
6793 * @param idMsr The MSR.
6794 */
6795IEM_STATIC bool iemVmxIsRdmsrWrmsrInterceptSet(PVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr)
6796{
6797 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
6798 Assert( uExitReason == VMX_EXIT_RDMSR
6799 || uExitReason == VMX_EXIT_WRMSR);
6800
6801 /* Consult the MSR bitmap if the feature is supported. */
6802 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6803 Assert(pVmcs);
6804 if (pVmcs->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
6805 {
6806 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap));
6807 if (uExitReason == VMX_EXIT_RDMSR)
6808 {
6809 VMXMSREXITREAD enmRead;
6810 int rc = CPUMVmxGetMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, &enmRead,
6811 NULL /* penmWrite */);
6812 AssertRC(rc);
6813 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
6814 return true;
6815 }
6816 else
6817 {
6818 VMXMSREXITWRITE enmWrite;
6819 int rc = CPUMVmxGetMsrPermission(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvMsrBitmap), idMsr, NULL /* penmRead */,
6820 &enmWrite);
6821 AssertRC(rc);
6822 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
6823 return true;
6824 }
6825 return false;
6826 }
6827
6828 /* Without MSR bitmaps, all MSR accesses are intercepted. */
6829 return true;
6830}
6831
6832
6833/**
6834 * Checks whether a VMREAD or VMWRITE instruction for the given VMCS field is
6835 * intercepted (causes a VM-exit) or not.
6836 *
6837 * @returns @c true if the instruction is intercepted, @c false otherwise.
6838 * @param pVCpu The cross context virtual CPU structure.
6839 * @param u64FieldEnc The VMCS field encoding.
6840 * @param uExitReason The VM-exit exit reason (VMX_EXIT_VMREAD or
6841 * VMX_EXIT_VMREAD).
6842 */
6843IEM_STATIC bool iemVmxIsVmreadVmwriteInterceptSet(PVMCPU pVCpu, uint32_t uExitReason, uint64_t u64FieldEnc)
6844{
6845 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
6846 Assert( uExitReason == VMX_EXIT_VMREAD
6847 || uExitReason == VMX_EXIT_VMWRITE);
6848
6849 /* Without VMCS shadowing, all VMREAD and VMWRITE instructions are intercepted. */
6850 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing)
6851 return true;
6852
6853 /*
6854 * If any reserved bit in the 64-bit VMCS field encoding is set, the VMREAD/VMWRITE is intercepted.
6855 * This excludes any reserved bits in the valid parts of the field encoding (i.e. bit 12).
6856 */
6857 if (u64FieldEnc & VMX_VMCS_ENC_RSVD_MASK)
6858 return true;
6859
6860 /* Finally, consult the VMREAD/VMWRITE bitmap whether to intercept the instruction or not. */
6861 uint32_t u32FieldEnc = RT_LO_U32(u64FieldEnc);
6862 Assert(u32FieldEnc >> 3 < VMX_V_VMREAD_VMWRITE_BITMAP_SIZE);
6863 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap));
6864 uint8_t const *pbBitmap = uExitReason == VMX_EXIT_VMREAD
6865 ? (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmreadBitmap)
6866 : (uint8_t const *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap);
6867 pbBitmap += (u32FieldEnc >> 3);
6868 if (*pbBitmap & RT_BIT(u32FieldEnc & 7))
6869 return true;
6870
6871 return false;
6872}
6873
6874
6875/**
6876 * VMREAD common (memory/register) instruction execution worker
6877 *
6878 * @returns Strict VBox status code.
6879 * @param pVCpu The cross context virtual CPU structure.
6880 * @param cbInstr The instruction length in bytes.
6881 * @param pu64Dst Where to write the VMCS value (only updated when
6882 * VINF_SUCCESS is returned).
6883 * @param u64FieldEnc The VMCS field encoding.
6884 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
6885 * be NULL.
6886 */
6887IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
6888 PCVMXVEXITINFO pExitInfo)
6889{
6890 /* Nested-guest intercept. */
6891 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6892 && iemVmxIsVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMREAD, u64FieldEnc))
6893 {
6894 if (pExitInfo)
6895 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
6896 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMREAD, VMXINSTRID_VMREAD, cbInstr);
6897 }
6898
6899 /* CPL. */
6900 if (pVCpu->iem.s.uCpl > 0)
6901 {
6902 Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
6903 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Cpl;
6904 return iemRaiseGeneralProtectionFault0(pVCpu);
6905 }
6906
6907 /* VMCS pointer in root mode. */
6908 if ( IEM_VMX_IS_ROOT_MODE(pVCpu)
6909 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
6910 {
6911 Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
6912 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrInvalid;
6913 iemVmxVmFailInvalid(pVCpu);
6914 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6915 return VINF_SUCCESS;
6916 }
6917
6918 /* VMCS-link pointer in non-root mode. */
6919 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
6920 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
6921 {
6922 Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
6923 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_LinkPtrInvalid;
6924 iemVmxVmFailInvalid(pVCpu);
6925 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6926 return VINF_SUCCESS;
6927 }
6928
6929 /* Supported VMCS field. */
6930 if (iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
6931 {
6932 Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
6933 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_FieldInvalid;
6934 iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
6935 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
6936 return VINF_SUCCESS;
6937 }
6938
6939 /*
6940 * Setup reading from the current or shadow VMCS.
6941 */
6942 uint8_t *pbVmcs;
6943 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6944 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
6945 else
6946 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
6947 Assert(pbVmcs);
6948
6949 VMXVMCSFIELDENC FieldEnc;
6950 FieldEnc.u = RT_LO_U32(u64FieldEnc);
6951 uint8_t const uWidth = FieldEnc.n.u2Width;
6952 uint8_t const uType = FieldEnc.n.u2Type;
6953 uint8_t const uWidthType = (uWidth << 2) | uType;
6954 uint8_t const uIndex = FieldEnc.n.u8Index;
6955 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
6956 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
6957
6958 /*
6959 * Read the VMCS component based on the field's effective width.
6960 *
6961 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
6962 * indicates high bits (little endian).
6963 *
6964 * Note! The caller is responsible to trim the result and update registers
6965 * or memory locations are required. Here we just zero-extend to the largest
6966 * type (i.e. 64-bits).
6967 */
6968 uint8_t *pbField = pbVmcs + offField;
6969 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
6970 switch (uEffWidth)
6971 {
6972 case VMX_VMCS_ENC_WIDTH_64BIT:
6973 case VMX_VMCS_ENC_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break;
6974 case VMX_VMCS_ENC_WIDTH_32BIT: *pu64Dst = *(uint32_t *)pbField; break;
6975 case VMX_VMCS_ENC_WIDTH_16BIT: *pu64Dst = *(uint16_t *)pbField; break;
6976 }
6977 return VINF_SUCCESS;
6978}
6979
6980
6981/**
6982 * VMREAD (64-bit register) instruction execution worker.
6983 *
6984 * @returns Strict VBox status code.
6985 * @param pVCpu The cross context virtual CPU structure.
6986 * @param cbInstr The instruction length in bytes.
6987 * @param pu64Dst Where to store the VMCS field's value.
6988 * @param u64FieldEnc The VMCS field encoding.
6989 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
6990 * be NULL.
6991 */
6992IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
6993 PCVMXVEXITINFO pExitInfo)
6994{
6995 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
6996 if (rcStrict == VINF_SUCCESS)
6997 {
6998 iemVmxVmreadSuccess(pVCpu, cbInstr);
6999 return VINF_SUCCESS;
7000 }
7001
7002 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7003 return rcStrict;
7004}
7005
7006
7007/**
7008 * VMREAD (32-bit register) instruction execution worker.
7009 *
7010 * @returns Strict VBox status code.
7011 * @param pVCpu The cross context virtual CPU structure.
7012 * @param cbInstr The instruction length in bytes.
7013 * @param pu32Dst Where to store the VMCS field's value.
7014 * @param u32FieldEnc The VMCS field encoding.
7015 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7016 * be NULL.
7017 */
7018IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPU pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint64_t u32FieldEnc,
7019 PCVMXVEXITINFO pExitInfo)
7020{
7021 uint64_t u64Dst;
7022 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u32FieldEnc, pExitInfo);
7023 if (rcStrict == VINF_SUCCESS)
7024 {
7025 *pu32Dst = u64Dst;
7026 iemVmxVmreadSuccess(pVCpu, cbInstr);
7027 return VINF_SUCCESS;
7028 }
7029
7030 Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7031 return rcStrict;
7032}
7033
7034
7035/**
7036 * VMREAD (memory) instruction execution worker.
7037 *
7038 * @returns Strict VBox status code.
7039 * @param pVCpu The cross context virtual CPU structure.
7040 * @param cbInstr The instruction length in bytes.
7041 * @param iEffSeg The effective segment register to use with @a u64Val.
7042 * Pass UINT8_MAX if it is a register access.
7043 * @param enmEffAddrMode The effective addressing mode (only used with memory
7044 * operand).
7045 * @param GCPtrDst The guest linear address to store the VMCS field's
7046 * value.
7047 * @param u64FieldEnc The VMCS field encoding.
7048 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7049 * be NULL.
7050 */
7051IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode,
7052 RTGCPTR GCPtrDst, uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
7053{
7054 uint64_t u64Dst;
7055 VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u64FieldEnc, pExitInfo);
7056 if (rcStrict == VINF_SUCCESS)
7057 {
7058 /*
7059 * Write the VMCS field's value to the location specified in guest-memory.
7060 *
7061 * The pointer size depends on the address size (address-size prefix allowed).
7062 * The operand size depends on IA-32e mode (operand-size prefix not allowed).
7063 */
7064 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
7065 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
7066 GCPtrDst &= s_auAddrSizeMasks[enmEffAddrMode];
7067
7068 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7069 rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
7070 else
7071 rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
7072 if (rcStrict == VINF_SUCCESS)
7073 {
7074 iemVmxVmreadSuccess(pVCpu, cbInstr);
7075 return VINF_SUCCESS;
7076 }
7077
7078 Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
7079 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrMap;
7080 return rcStrict;
7081 }
7082
7083 Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7084 return rcStrict;
7085}
7086
7087
7088/**
7089 * VMWRITE instruction execution worker.
7090 *
7091 * @returns Strict VBox status code.
7092 * @param pVCpu The cross context virtual CPU structure.
7093 * @param cbInstr The instruction length in bytes.
7094 * @param iEffSeg The effective segment register to use with @a u64Val.
7095 * Pass UINT8_MAX if it is a register access.
7096 * @param enmEffAddrMode The effective addressing mode (only used with memory
7097 * operand).
7098 * @param u64Val The value to write (or guest linear address to the
7099 * value), @a iEffSeg will indicate if it's a memory
7100 * operand.
7101 * @param u64FieldEnc The VMCS field encoding.
7102 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7103 * be NULL.
7104 */
7105IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, uint64_t u64Val,
7106 uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
7107{
7108 /* Nested-guest intercept. */
7109 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7110 && iemVmxIsVmreadVmwriteInterceptSet(pVCpu, VMX_EXIT_VMWRITE, u64FieldEnc))
7111 {
7112 if (pExitInfo)
7113 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7114 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMWRITE, VMXINSTRID_VMWRITE, cbInstr);
7115 }
7116
7117 /* CPL. */
7118 if (pVCpu->iem.s.uCpl > 0)
7119 {
7120 Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7121 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Cpl;
7122 return iemRaiseGeneralProtectionFault0(pVCpu);
7123 }
7124
7125 /* VMCS pointer in root mode. */
7126 if ( IEM_VMX_IS_ROOT_MODE(pVCpu)
7127 && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
7128 {
7129 Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
7130 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrInvalid;
7131 iemVmxVmFailInvalid(pVCpu);
7132 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7133 return VINF_SUCCESS;
7134 }
7135
7136 /* VMCS-link pointer in non-root mode. */
7137 if ( IEM_VMX_IS_NON_ROOT_MODE(pVCpu)
7138 && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
7139 {
7140 Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
7141 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_LinkPtrInvalid;
7142 iemVmxVmFailInvalid(pVCpu);
7143 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7144 return VINF_SUCCESS;
7145 }
7146
7147 /* If the VMWRITE instruction references memory, access the specified memory operand. */
7148 bool const fIsRegOperand = iEffSeg == UINT8_MAX;
7149 if (!fIsRegOperand)
7150 {
7151 static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
7152 Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
7153 RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[enmEffAddrMode];
7154
7155 /* Read the value from the specified guest memory location. */
7156 VBOXSTRICTRC rcStrict;
7157 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7158 rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
7159 else
7160 {
7161 uint32_t u32Val;
7162 rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, iEffSeg, GCPtrVal);
7163 u64Val = u32Val;
7164 }
7165 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7166 {
7167 Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
7168 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrMap;
7169 return rcStrict;
7170 }
7171 }
7172 else
7173 Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
7174
7175 /* Supported VMCS field. */
7176 if (!iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
7177 {
7178 Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
7179 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldInvalid;
7180 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
7181 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7182 return VINF_SUCCESS;
7183 }
7184
7185 /* Read-only VMCS field. */
7186 bool const fIsFieldReadOnly = HMVmxIsVmcsFieldReadOnly(u64FieldEnc);
7187 if ( fIsFieldReadOnly
7188 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
7189 {
7190 Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64FieldEnc));
7191 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldRo;
7192 iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
7193 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7194 return VINF_SUCCESS;
7195 }
7196
7197 /*
7198 * Setup writing to the current or shadow VMCS.
7199 */
7200 uint8_t *pbVmcs;
7201 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7202 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
7203 else
7204 pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
7205 Assert(pbVmcs);
7206
7207 VMXVMCSFIELDENC FieldEnc;
7208 FieldEnc.u = RT_LO_U32(u64FieldEnc);
7209 uint8_t const uWidth = FieldEnc.n.u2Width;
7210 uint8_t const uType = FieldEnc.n.u2Type;
7211 uint8_t const uWidthType = (uWidth << 2) | uType;
7212 uint8_t const uIndex = FieldEnc.n.u8Index;
7213 AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
7214 uint16_t const offField = g_aoffVmcsMap[uWidthType][uIndex];
7215
7216 /*
7217 * Write the VMCS component based on the field's effective width.
7218 *
7219 * The effective width is 64-bit fields adjusted to 32-bits if the access-type
7220 * indicates high bits (little endian).
7221 */
7222 uint8_t *pbField = pbVmcs + offField;
7223 uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
7224 switch (uEffWidth)
7225 {
7226 case VMX_VMCS_ENC_WIDTH_64BIT:
7227 case VMX_VMCS_ENC_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
7228 case VMX_VMCS_ENC_WIDTH_32BIT: *(uint32_t *)pbField = u64Val; break;
7229 case VMX_VMCS_ENC_WIDTH_16BIT: *(uint16_t *)pbField = u64Val; break;
7230 }
7231
7232 iemVmxVmSucceed(pVCpu);
7233 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7234 return VINF_SUCCESS;
7235}
7236
7237
7238/**
7239 * VMCLEAR instruction execution worker.
7240 *
7241 * @returns Strict VBox status code.
7242 * @param pVCpu The cross context virtual CPU structure.
7243 * @param cbInstr The instruction length in bytes.
7244 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
7245 * @param GCPtrVmcs The linear address of the VMCS pointer.
7246 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7247 * be NULL.
7248 *
7249 * @remarks Common VMX instruction checks are already expected to by the caller,
7250 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7251 */
7252IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
7253 PCVMXVEXITINFO pExitInfo)
7254{
7255 /* Nested-guest intercept. */
7256 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7257 {
7258 if (pExitInfo)
7259 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7260 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMCLEAR, VMXINSTRID_NONE, cbInstr);
7261 }
7262
7263 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7264
7265 /* CPL. */
7266 if (pVCpu->iem.s.uCpl > 0)
7267 {
7268 Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7269 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Cpl;
7270 return iemRaiseGeneralProtectionFault0(pVCpu);
7271 }
7272
7273 /* Get the VMCS pointer from the location specified by the source memory operand. */
7274 RTGCPHYS GCPhysVmcs;
7275 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
7276 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7277 {
7278 Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
7279 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrMap;
7280 return rcStrict;
7281 }
7282
7283 /* VMCS pointer alignment. */
7284 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
7285 {
7286 Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
7287 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAlign;
7288 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7289 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7290 return VINF_SUCCESS;
7291 }
7292
7293 /* VMCS physical-address width limits. */
7294 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
7295 {
7296 Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
7297 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrWidth;
7298 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7299 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7300 return VINF_SUCCESS;
7301 }
7302
7303 /* VMCS is not the VMXON region. */
7304 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
7305 {
7306 Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
7307 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrVmxon;
7308 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
7309 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7310 return VINF_SUCCESS;
7311 }
7312
7313 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
7314 restriction imposed by our implementation. */
7315 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
7316 {
7317 Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
7318 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAbnormal;
7319 iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
7320 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7321 return VINF_SUCCESS;
7322 }
7323
7324 /*
7325 * VMCLEAR allows committing and clearing any valid VMCS pointer.
7326 *
7327 * If the current VMCS is the one being cleared, set its state to 'clear' and commit
7328 * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
7329 * to 'clear'.
7330 */
7331 uint8_t const fVmcsStateClear = VMX_V_VMCS_STATE_CLEAR;
7332 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
7333 {
7334 Assert(GCPhysVmcs != NIL_RTGCPHYS); /* Paranoia. */
7335 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
7336 pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsStateClear;
7337 iemVmxCommitCurrentVmcsToMemory(pVCpu);
7338 Assert(!IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
7339 }
7340 else
7341 {
7342 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPtrVmcs + RT_UOFFSETOF(VMXVVMCS, fVmcsState),
7343 (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear));
7344 }
7345
7346 iemVmxVmSucceed(pVCpu);
7347 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7348 return rcStrict;
7349}
7350
7351
7352/**
7353 * VMPTRST instruction execution worker.
7354 *
7355 * @returns Strict VBox status code.
7356 * @param pVCpu The cross context virtual CPU structure.
7357 * @param cbInstr The instruction length in bytes.
7358 * @param iEffSeg The effective segment register to use with @a GCPtrVmcs.
7359 * @param GCPtrVmcs The linear address of where to store the current VMCS
7360 * pointer.
7361 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7362 * be NULL.
7363 *
7364 * @remarks Common VMX instruction checks are already expected to by the caller,
7365 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7366 */
7367IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
7368 PCVMXVEXITINFO pExitInfo)
7369{
7370 /* Nested-guest intercept. */
7371 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7372 {
7373 if (pExitInfo)
7374 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7375 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRST, VMXINSTRID_NONE, cbInstr);
7376 }
7377
7378 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7379
7380 /* CPL. */
7381 if (pVCpu->iem.s.uCpl > 0)
7382 {
7383 Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7384 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Cpl;
7385 return iemRaiseGeneralProtectionFault0(pVCpu);
7386 }
7387
7388 /* Set the VMCS pointer to the location specified by the destination memory operand. */
7389 AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
7390 VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
7391 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
7392 {
7393 iemVmxVmSucceed(pVCpu);
7394 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7395 return rcStrict;
7396 }
7397
7398 Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
7399 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_PtrMap;
7400 return rcStrict;
7401}
7402
7403
7404/**
7405 * VMPTRLD instruction execution worker.
7406 *
7407 * @returns Strict VBox status code.
7408 * @param pVCpu The cross context virtual CPU structure.
7409 * @param cbInstr The instruction length in bytes.
7410 * @param GCPtrVmcs The linear address of the current VMCS pointer.
7411 * @param pExitInfo Pointer to the VM-exit information struct. Optional, can
7412 * be NULL.
7413 *
7414 * @remarks Common VMX instruction checks are already expected to by the caller,
7415 * i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7416 */
7417IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
7418 PCVMXVEXITINFO pExitInfo)
7419{
7420 /* Nested-guest intercept. */
7421 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7422 {
7423 if (pExitInfo)
7424 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7425 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRLD, VMXINSTRID_NONE, cbInstr);
7426 }
7427
7428 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7429
7430 /* CPL. */
7431 if (pVCpu->iem.s.uCpl > 0)
7432 {
7433 Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7434 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Cpl;
7435 return iemRaiseGeneralProtectionFault0(pVCpu);
7436 }
7437
7438 /* Get the VMCS pointer from the location specified by the source memory operand. */
7439 RTGCPHYS GCPhysVmcs;
7440 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
7441 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7442 {
7443 Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
7444 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrMap;
7445 return rcStrict;
7446 }
7447
7448 /* VMCS pointer alignment. */
7449 if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
7450 {
7451 Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
7452 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAlign;
7453 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
7454 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7455 return VINF_SUCCESS;
7456 }
7457
7458 /* VMCS physical-address width limits. */
7459 if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
7460 {
7461 Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
7462 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrWidth;
7463 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
7464 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7465 return VINF_SUCCESS;
7466 }
7467
7468 /* VMCS is not the VMXON region. */
7469 if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
7470 {
7471 Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
7472 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrVmxon;
7473 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
7474 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7475 return VINF_SUCCESS;
7476 }
7477
7478 /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
7479 restriction imposed by our implementation. */
7480 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
7481 {
7482 Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
7483 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAbnormal;
7484 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
7485 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7486 return VINF_SUCCESS;
7487 }
7488
7489 /* Read the VMCS revision ID from the VMCS. */
7490 VMXVMCSREVID VmcsRevId;
7491 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
7492 if (RT_FAILURE(rc))
7493 {
7494 Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
7495 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrReadPhys;
7496 return rc;
7497 }
7498
7499 /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
7500 also check VMCS shadowing feature. */
7501 if ( VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
7502 || ( VmcsRevId.n.fIsShadowVmcs
7503 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
7504 {
7505 if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
7506 {
7507 Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
7508 VmcsRevId.n.u31RevisionId));
7509 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_VmcsRevId;
7510 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
7511 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7512 return VINF_SUCCESS;
7513 }
7514
7515 Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
7516 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_ShadowVmcs;
7517 iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
7518 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7519 return VINF_SUCCESS;
7520 }
7521
7522 /*
7523 * We only maintain only the current VMCS in our virtual CPU context (CPUMCTX). Therefore,
7524 * VMPTRLD shall always flush any existing current VMCS back to guest memory before loading
7525 * a new VMCS as current.
7526 */
7527 if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
7528 {
7529 iemVmxCommitCurrentVmcsToMemory(pVCpu);
7530 IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
7531 }
7532
7533 iemVmxVmSucceed(pVCpu);
7534 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7535 return VINF_SUCCESS;
7536}
7537
7538
7539/**
7540 * VMXON instruction execution worker.
7541 *
7542 * @returns Strict VBox status code.
7543 * @param pVCpu The cross context virtual CPU structure.
7544 * @param cbInstr The instruction length in bytes.
7545 * @param iEffSeg The effective segment register to use with @a
7546 * GCPtrVmxon.
7547 * @param GCPtrVmxon The linear address of the VMXON pointer.
7548 * @param pExitInfo Pointer to the VM-exit instruction information struct.
7549 * Optional, can be NULL.
7550 *
7551 * @remarks Common VMX instruction checks are already expected to by the caller,
7552 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7553 */
7554IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
7555 PCVMXVEXITINFO pExitInfo)
7556{
7557#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
7558 RT_NOREF5(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
7559 return VINF_EM_RAW_EMULATE_INSTR;
7560#else
7561 if (!IEM_VMX_IS_ROOT_MODE(pVCpu))
7562 {
7563 /* CPL. */
7564 if (pVCpu->iem.s.uCpl > 0)
7565 {
7566 Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7567 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cpl;
7568 return iemRaiseGeneralProtectionFault0(pVCpu);
7569 }
7570
7571 /* A20M (A20 Masked) mode. */
7572 if (!PGMPhysIsA20Enabled(pVCpu))
7573 {
7574 Log(("vmxon: A20M mode -> #GP(0)\n"));
7575 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_A20M;
7576 return iemRaiseGeneralProtectionFault0(pVCpu);
7577 }
7578
7579 /* CR0. */
7580 {
7581 /* CR0 MB1 bits. */
7582 uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
7583 if ((pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0) != uCr0Fixed0)
7584 {
7585 Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
7586 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed0;
7587 return iemRaiseGeneralProtectionFault0(pVCpu);
7588 }
7589
7590 /* CR0 MBZ bits. */
7591 uint64_t const uCr0Fixed1 = CPUMGetGuestIa32VmxCr0Fixed1(pVCpu);
7592 if (pVCpu->cpum.GstCtx.cr0 & ~uCr0Fixed1)
7593 {
7594 Log(("vmxon: CR0 fixed1 bits set -> #GP(0)\n"));
7595 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed1;
7596 return iemRaiseGeneralProtectionFault0(pVCpu);
7597 }
7598 }
7599
7600 /* CR4. */
7601 {
7602 /* CR4 MB1 bits. */
7603 uint64_t const uCr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
7604 if ((pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0) != uCr4Fixed0)
7605 {
7606 Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
7607 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed0;
7608 return iemRaiseGeneralProtectionFault0(pVCpu);
7609 }
7610
7611 /* CR4 MBZ bits. */
7612 uint64_t const uCr4Fixed1 = CPUMGetGuestIa32VmxCr4Fixed1(pVCpu);
7613 if (pVCpu->cpum.GstCtx.cr4 & ~uCr4Fixed1)
7614 {
7615 Log(("vmxon: CR4 fixed1 bits set -> #GP(0)\n"));
7616 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed1;
7617 return iemRaiseGeneralProtectionFault0(pVCpu);
7618 }
7619 }
7620
7621 /* Feature control MSR's LOCK and VMXON bits. */
7622 uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
7623 if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
7624 {
7625 Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
7626 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_MsrFeatCtl;
7627 return iemRaiseGeneralProtectionFault0(pVCpu);
7628 }
7629
7630 /* Get the VMXON pointer from the location specified by the source memory operand. */
7631 RTGCPHYS GCPhysVmxon;
7632 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
7633 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
7634 {
7635 Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
7636 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrMap;
7637 return rcStrict;
7638 }
7639
7640 /* VMXON region pointer alignment. */
7641 if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
7642 {
7643 Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
7644 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAlign;
7645 iemVmxVmFailInvalid(pVCpu);
7646 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7647 return VINF_SUCCESS;
7648 }
7649
7650 /* VMXON physical-address width limits. */
7651 if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
7652 {
7653 Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
7654 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrWidth;
7655 iemVmxVmFailInvalid(pVCpu);
7656 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7657 return VINF_SUCCESS;
7658 }
7659
7660 /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
7661 restriction imposed by our implementation. */
7662 if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
7663 {
7664 Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
7665 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAbnormal;
7666 iemVmxVmFailInvalid(pVCpu);
7667 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7668 return VINF_SUCCESS;
7669 }
7670
7671 /* Read the VMCS revision ID from the VMXON region. */
7672 VMXVMCSREVID VmcsRevId;
7673 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
7674 if (RT_FAILURE(rc))
7675 {
7676 Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
7677 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrReadPhys;
7678 return rc;
7679 }
7680
7681 /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
7682 if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
7683 {
7684 /* Revision ID mismatch. */
7685 if (!VmcsRevId.n.fIsShadowVmcs)
7686 {
7687 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
7688 VmcsRevId.n.u31RevisionId));
7689 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmcsRevId;
7690 iemVmxVmFailInvalid(pVCpu);
7691 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7692 return VINF_SUCCESS;
7693 }
7694
7695 /* Shadow VMCS disallowed. */
7696 Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
7697 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_ShadowVmcs;
7698 iemVmxVmFailInvalid(pVCpu);
7699 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7700 return VINF_SUCCESS;
7701 }
7702
7703 /*
7704 * Record that we're in VMX operation, block INIT, block and disable A20M.
7705 */
7706 pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon = GCPhysVmxon;
7707 IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
7708 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
7709
7710 /* Clear address-range monitoring. */
7711 EMMonitorWaitClear(pVCpu);
7712 /** @todo NSTVMX: Intel PT. */
7713
7714 iemVmxVmSucceed(pVCpu);
7715 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7716# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
7717 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
7718# else
7719 return VINF_SUCCESS;
7720# endif
7721 }
7722 else if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7723 {
7724 /* Nested-guest intercept. */
7725 if (pExitInfo)
7726 return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
7727 return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMXON, VMXINSTRID_NONE, cbInstr);
7728 }
7729
7730 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
7731
7732 /* CPL. */
7733 if (pVCpu->iem.s.uCpl > 0)
7734 {
7735 Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7736 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxRootCpl;
7737 return iemRaiseGeneralProtectionFault0(pVCpu);
7738 }
7739
7740 /* VMXON when already in VMX root mode. */
7741 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
7742 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxAlreadyRoot;
7743 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7744 return VINF_SUCCESS;
7745#endif
7746}
7747
7748
7749/**
7750 * Implements 'VMXOFF'.
7751 *
7752 * @remarks Common VMX instruction checks are already expected to by the caller,
7753 * i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
7754 */
7755IEM_CIMPL_DEF_0(iemCImpl_vmxoff)
7756{
7757# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
7758 RT_NOREF2(pVCpu, cbInstr);
7759 return VINF_EM_RAW_EMULATE_INSTR;
7760# else
7761 /* Nested-guest intercept. */
7762 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7763 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMXOFF, cbInstr);
7764
7765 /* CPL. */
7766 if (pVCpu->iem.s.uCpl > 0)
7767 {
7768 Log(("vmxoff: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
7769 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxoff_Cpl;
7770 return iemRaiseGeneralProtectionFault0(pVCpu);
7771 }
7772
7773 /* Dual monitor treatment of SMIs and SMM. */
7774 uint64_t const fSmmMonitorCtl = CPUMGetGuestIa32SmmMonitorCtl(pVCpu);
7775 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VALID)
7776 {
7777 iemVmxVmFail(pVCpu, VMXINSTRERR_VMXOFF_DUAL_MON);
7778 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7779 return VINF_SUCCESS;
7780 }
7781
7782 /* Record that we're no longer in VMX root operation, block INIT, block and disable A20M. */
7783 pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = false;
7784 Assert(!pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode);
7785
7786 if (fSmmMonitorCtl & MSR_IA32_SMM_MONITOR_VMXOFF_UNBLOCK_SMI)
7787 { /** @todo NSTVMX: Unblock SMI. */ }
7788
7789 EMMonitorWaitClear(pVCpu);
7790 /** @todo NSTVMX: Unblock and enable A20M. */
7791
7792 iemVmxVmSucceed(pVCpu);
7793 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7794# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
7795 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
7796# else
7797 return VINF_SUCCESS;
7798# endif
7799# endif
7800}
7801
7802
7803/**
7804 * Implements 'VMXON'.
7805 */
7806IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
7807{
7808 return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
7809}
7810
7811
7812/**
7813 * Implements 'VMLAUNCH'.
7814 */
7815IEM_CIMPL_DEF_0(iemCImpl_vmlaunch)
7816{
7817 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMLAUNCH, NULL /* pExitInfo */);
7818}
7819
7820
7821/**
7822 * Implements 'VMRESUME'.
7823 */
7824IEM_CIMPL_DEF_0(iemCImpl_vmresume)
7825{
7826 return iemVmxVmlaunchVmresume(pVCpu, cbInstr, VMXINSTRID_VMRESUME, NULL /* pExitInfo */);
7827}
7828
7829
7830/**
7831 * Implements 'VMPTRLD'.
7832 */
7833IEM_CIMPL_DEF_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
7834{
7835 return iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
7836}
7837
7838
7839/**
7840 * Implements 'VMPTRST'.
7841 */
7842IEM_CIMPL_DEF_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
7843{
7844 return iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
7845}
7846
7847
7848/**
7849 * Implements 'VMCLEAR'.
7850 */
7851IEM_CIMPL_DEF_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs)
7852{
7853 return iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, NULL /* pExitInfo */);
7854}
7855
7856
7857/**
7858 * Implements 'VMWRITE' register.
7859 */
7860IEM_CIMPL_DEF_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64FieldEnc)
7861{
7862 return iemVmxVmwrite(pVCpu, cbInstr, UINT8_MAX /* iEffSeg */, IEMMODE_64BIT /* N/A */, u64Val, u64FieldEnc,
7863 NULL /* pExitInfo */);
7864}
7865
7866
7867/**
7868 * Implements 'VMWRITE' memory.
7869 */
7870IEM_CIMPL_DEF_4(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrVal, uint32_t, u64FieldEnc)
7871{
7872 return iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrVal, u64FieldEnc, NULL /* pExitInfo */);
7873}
7874
7875
7876/**
7877 * Implements 'VMREAD' 64-bit register.
7878 */
7879IEM_CIMPL_DEF_2(iemCImpl_vmread64_reg, uint64_t *, pu64Dst, uint64_t, u64FieldEnc)
7880{
7881 return iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, NULL /* pExitInfo */);
7882}
7883
7884
7885/**
7886 * Implements 'VMREAD' 32-bit register.
7887 */
7888IEM_CIMPL_DEF_2(iemCImpl_vmread32_reg, uint32_t *, pu32Dst, uint32_t, u32FieldEnc)
7889{
7890 return iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u32FieldEnc, NULL /* pExitInfo */);
7891}
7892
7893
7894/**
7895 * Implements 'VMREAD' memory.
7896 */
7897IEM_CIMPL_DEF_4(iemCImpl_vmread_mem, uint8_t, iEffSeg, IEMMODE, enmEffAddrMode, RTGCPTR, GCPtrDst, uint32_t, u64FieldEnc)
7898{
7899 return iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, u64FieldEnc, NULL /* pExitInfo */);
7900}
7901
7902
7903/**
7904 * Implements VMX's implementation of PAUSE.
7905 */
7906IEM_CIMPL_DEF_0(iemCImpl_vmx_pause)
7907{
7908 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7909 {
7910 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrPause(pVCpu, cbInstr);
7911 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
7912 return rcStrict;
7913 }
7914
7915 /*
7916 * Outside VMX non-root operation or if the PAUSE instruction does not cause
7917 * a VM-exit, the instruction operates normally.
7918 */
7919 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
7920 return VINF_SUCCESS;
7921}
7922
7923#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
7924
7925
7926/**
7927 * Implements 'VMCALL'.
7928 */
7929IEM_CIMPL_DEF_0(iemCImpl_vmcall)
7930{
7931#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
7932 /* Nested-guest intercept. */
7933 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
7934 return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMCALL, cbInstr);
7935#endif
7936
7937 /* Join forces with vmmcall. */
7938 return IEM_CIMPL_CALL_1(iemCImpl_Hypercall, OP_VMCALL);
7939}
7940
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette