VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp@ 74121

Last change on this file since 74121 was 74114, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 vmlaunch/vmresume bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 35.0 KB
Line 
1/* $Id: HMVMXAll.cpp 74114 2018-09-06 12:10:09Z vboxsync $ */
2/** @file
3 * HM VMX (VT-x) - All contexts.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include "HMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/pdmapi.h>
27
28
29/*********************************************************************************************************************************
30* Global Variables *
31*********************************************************************************************************************************/
32#define VMX_INSTR_DIAG_DESC(a_Def, a_Desc) #a_Def " - " #a_Desc
33static const char * const g_apszVmxInstrDiagDesc[kVmxVInstrDiag_Last] =
34{
35 /* Internal processing errors. */
36 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_1 , "Ipe_1" ),
37 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_2 , "Ipe_2" ),
38 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_3 , "Ipe_3" ),
39 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_4 , "Ipe_4" ),
40 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_5 , "Ipe_5" ),
41 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_6 , "Ipe_6" ),
42 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_7 , "Ipe_7" ),
43 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_8 , "Ipe_8" ),
44 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_9 , "Ipe_9" ),
45 /* VMXON. */
46 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_A20M , "A20M" ),
47 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cpl , "Cpl" ),
48 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr0Fixed0 , "Cr0Fixed0" ),
49 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr4Fixed0 , "Cr4Fixed0" ),
50 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Intercept , "Intercept" ),
51 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_LongModeCS , "LongModeCS" ),
52 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_MsrFeatCtl , "MsrFeatCtl" ),
53 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAbnormal , "PtrAbnormal" ),
54 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAlign , "PtrAlign" ),
55 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrMap , "PtrMap" ),
56 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrPhysRead , "PtrPhysRead" ),
57 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrWidth , "PtrWidth" ),
58 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_RealOrV86Mode , "RealOrV86Mode" ),
59 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Success , "Success" ),
60 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_ShadowVmcs , "ShadowVmcs" ),
61 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxAlreadyRoot , "VmxAlreadyRoot" ),
62 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Vmxe , "Vmxe" ),
63 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmcsRevId , "VmcsRevId" ),
64 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxRootCpl , "VmxRootCpl" ),
65 /* VMXOFF. */
66 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Cpl , "Cpl" ),
67 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Intercept , "Intercept" ),
68 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_LongModeCS , "LongModeCS" ),
69 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_RealOrV86Mode , "RealOrV86Mode" ),
70 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Success , "Success" ),
71 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Vmxe , "Vmxe" ),
72 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_VmxRoot , "VmxRoot" ),
73 /* VMPTRLD. */
74 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_Cpl , "Cpl" ),
75 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_LongModeCS , "LongModeCS" ),
76 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrAbnormal , "PtrAbnormal" ),
77 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrAlign , "PtrAlign" ),
78 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrMap , "PtrMap" ),
79 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrReadPhys , "PtrReadPhys" ),
80 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrVmxon , "PtrVmxon" ),
81 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrWidth , "PtrWidth" ),
82 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_RealOrV86Mode , "RealOrV86Mode" ),
83 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_ShadowVmcs , "ShadowVmcs" ),
84 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_Success , "Success" ),
85 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_VmcsRevId , "VmcsRevId" ),
86 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_VmxRoot , "VmxRoot" ),
87 /* VMPTRST. */
88 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_Cpl , "Cpl" ),
89 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_LongModeCS , "LongModeCS" ),
90 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_PtrMap , "PtrMap" ),
91 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_RealOrV86Mode , "RealOrV86Mode" ),
92 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_Success , "Success" ),
93 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_VmxRoot , "VmxRoot" ),
94 /* VMCLEAR. */
95 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Cpl , "Cpl" ),
96 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_LongModeCS , "LongModeCS" ),
97 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrAbnormal , "PtrAbnormal" ),
98 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrAlign , "PtrAlign" ),
99 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrMap , "PtrMap" ),
100 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrReadPhys , "PtrReadPhys" ),
101 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrVmxon , "PtrVmxon" ),
102 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrWidth , "PtrWidth" ),
103 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_RealOrV86Mode , "RealOrV86Mode" ),
104 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Success , "Success" ),
105 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_VmxRoot , "VmxRoot" ),
106 /* VMWRITE. */
107 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_Cpl , "Cpl" ),
108 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_FieldInvalid , "FieldInvalid" ),
109 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_FieldRo , "FieldRo" ),
110 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_LinkPtrInvalid , "LinkPtrInvalid" ),
111 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_LongModeCS , "LongModeCS" ),
112 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_PtrInvalid , "PtrInvalid" ),
113 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_PtrMap , "PtrMap" ),
114 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_RealOrV86Mode , "RealOrV86Mode" ),
115 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_Success , "Success" ),
116 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_VmxRoot , "VmxRoot" ),
117 /* VMREAD. */
118 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_Cpl , "Cpl" ),
119 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_FieldInvalid , "FieldInvalid" ),
120 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_LinkPtrInvalid , "LinkPtrInvalid" ),
121 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_LongModeCS , "LongModeCS" ),
122 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_PtrInvalid , "PtrInvalid" ),
123 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_PtrMap , "PtrMap" ),
124 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_RealOrV86Mode , "RealOrV86Mode" ),
125 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_Success , "Success" ),
126 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_VmxRoot , "VmxRoot" ),
127 /* VMLAUNCH/VMRESUME. */
128 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrApicAccess , "AddrApicAccess" ),
129 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrEntryMsrLoad , "AddrEntryMsrLoad" ),
130 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrExitMsrLoad , "AddrExitMsrLoad" ),
131 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrExitMsrStore , "AddrExitMsrStore" ),
132 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrIoBitmapA , "AddrIoBitmapA" ),
133 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrIoBitmapB , "AddrIoBitmapB" ),
134 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrMsrBitmap , "AddrMsrBitmap" ),
135 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrVirtApicPage , "AddrVirtApicPage" ),
136 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrVmreadBitmap , "AddrVmreadBitmap" ),
137 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrVmwriteBitmap , "AddrVmwriteBitmap" ),
138 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ApicRegVirt , "ApicRegVirt" ),
139 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_BlocKMovSS , "BlockMovSS" ),
140 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_Cpl , "Cpl" ),
141 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_Cr3TargetCount , "Cr3TargetCount" ),
142 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryCtlsAllowed1 , "EntryCtlsAllowed1" ),
143 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryCtlsDisallowed0 , "EntryCtlsDisallowed0" ),
144 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostAddrSpace , "HostAddrSpace" ),
145 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostCr0Fixed0 , "HostCr0Fixed0" ),
146 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostCr0Fixed1 , "HostCr0Fixed1" ),
147 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostCr3 , "HostCr3" ),
148 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostCr4Fixed0 , "HostCr4Fixed0" ),
149 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostCr4Fixed1 , "HostCr4Fixed1" ),
150 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostCr4Pae , "HostCr4Pae" ),
151 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostCr4Pcide , "HostCr4Pcide" ),
152 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostCsTr , "HostCsTr" ),
153 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostEferMsr , "HostEferMsr" ),
154 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostGuestLongMode , "HostGuestLongMode" ),
155 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostGuestLongModeNoCpu , "HostGuestLongModeNoCpu" ),
156 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostLongMode , "HostLongMode" ),
157 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostPatMsr , "HostPatMsr" ),
158 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostRip , "HostRip" ),
159 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostRipRsvd , "HostRipRsvd" ),
160 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostSel , "HostSel" ),
161 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostSegBase , "HostSegBase" ),
162 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostSs , "HostSs" ),
163 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_HostSysenterEspEip , "HostSysenterEspEip" ),
164 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryInstrLen , "EntryInstrLen" ),
165 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryInstrLenZero , "EntryInstrLenZero" ),
166 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryIntInfoErrCodePe , "EntryIntInfoErrCodePe" ),
167 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryIntInfoErrCodeVec , "EntryIntInfoErrCodeVec" ),
168 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryIntInfoTypeVecRsvd , "EntryIntInfoTypeVecRsvd" ),
169 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryXcptErrCodeRsvd , "EntryXcptErrCodeRsvd" ),
170 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsAllowed1 , "ExitCtlsAllowed1" ),
171 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsDisallowed0 , "ExitCtlsDisallowed0" ),
172 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_LongModeCS , "LongModeCS" ),
173 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_NmiWindowExit , "NmiWindowExit" ),
174 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_PinCtlsAllowed1 , "PinCtlsAllowed1" ),
175 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_PinCtlsDisallowed0 , "PinCtlsDisallowed0" ),
176 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ProcCtlsAllowed1 , "ProcCtlsAllowed1" ),
177 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ProcCtlsDisallowed0 , "ProcCtlsDisallowed0" ),
178 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ProcCtls2Allowed1 , "ProcCtls2Allowed1" ),
179 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ProcCtls2Disallowed0 , "ProcCtls2Disallowed0" ),
180 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_PtrInvalid , "PtrInvalid" ),
181 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_PtrReadPhys , "PtrReadPhys" ),
182 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_RealOrV86Mode , "RealOrV86Mode" ),
183 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_SavePreemptTimer , "SavePreemptTimer" ),
184 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_Success , "Success" ),
185 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_TprThreshold , "TprThreshold" ),
186 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_TprThresholdVTpr , "TprThresholdVTpr" ),
187 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VirtApicPagePtrReadPhys , "VirtApicPageReadPhys" ),
188 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VirtIntDelivery , "VirtIntDelivery" ),
189 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VirtNmi , "VirtNmi" ),
190 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VirtX2ApicTprShadow , "VirtX2ApicTprShadow" ),
191 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VirtX2ApicVirtApic , "VirtX2ApicVirtApic" ),
192 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VmcsClear , "VmcsClear" ),
193 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VmcsLaunch , "VmcsLaunch" ),
194 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VmxRoot , "VmxRoot" ),
195 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_Vpid , "Vpid" )
196 /* kVmxVInstrDiag_Last */
197};
198#undef VMX_INSTR_DIAG_DESC
199
200
201/**
202 * Gets a copy of the VMX host MSRs that were read by HM during ring-0
203 * initialization.
204 *
205 * @return VBox status code.
206 * @param pVM The cross context VM structure.
207 * @param pVmxMsrs Where to store the VMXMSRS struct (only valid when
208 * VINF_SUCCESS is returned).
209 *
210 * @remarks Caller needs to take care not to call this function too early. Call
211 * after HM initialization is fully complete.
212 */
213VMM_INT_DECL(int) HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs)
214{
215 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
216 AssertPtrReturn(pVmxMsrs, VERR_INVALID_PARAMETER);
217 if (pVM->hm.s.vmx.fSupported)
218 {
219 *pVmxMsrs = pVM->hm.s.vmx.Msrs;
220 return VINF_SUCCESS;
221 }
222 return VERR_VMX_NOT_SUPPORTED;
223}
224
225
226/**
227 * Gets the specified VMX host MSR that was read by HM during ring-0
228 * initialization.
229 *
230 * @return VBox status code.
231 * @param pVM The cross context VM structure.
232 * @param idMsr The MSR.
233 * @param puValue Where to store the MSR value (only updated when VINF_SUCCESS
234 * is returned).
235 *
236 * @remarks Caller needs to take care not to call this function too early. Call
237 * after HM initialization is fully complete.
238 */
239VMM_INT_DECL(int) HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue)
240{
241 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
242 AssertPtrReturn(puValue, VERR_INVALID_PARAMETER);
243
244 if (!pVM->hm.s.vmx.fSupported)
245 return VERR_VMX_NOT_SUPPORTED;
246
247 PCVMXMSRS pVmxMsrs = &pVM->hm.s.vmx.Msrs;
248 switch (idMsr)
249 {
250 case MSR_IA32_FEATURE_CONTROL: *puValue = pVmxMsrs->u64FeatCtrl; break;
251 case MSR_IA32_VMX_BASIC: *puValue = pVmxMsrs->u64Basic; break;
252 case MSR_IA32_VMX_PINBASED_CTLS: *puValue = pVmxMsrs->PinCtls.u; break;
253 case MSR_IA32_VMX_PROCBASED_CTLS: *puValue = pVmxMsrs->ProcCtls.u; break;
254 case MSR_IA32_VMX_PROCBASED_CTLS2: *puValue = pVmxMsrs->ProcCtls2.u; break;
255 case MSR_IA32_VMX_EXIT_CTLS: *puValue = pVmxMsrs->ExitCtls.u; break;
256 case MSR_IA32_VMX_ENTRY_CTLS: *puValue = pVmxMsrs->EntryCtls.u; break;
257 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: *puValue = pVmxMsrs->TruePinCtls.u; break;
258 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *puValue = pVmxMsrs->TrueProcCtls.u; break;
259 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: *puValue = pVmxMsrs->TrueEntryCtls.u; break;
260 case MSR_IA32_VMX_TRUE_EXIT_CTLS: *puValue = pVmxMsrs->TrueExitCtls.u; break;
261 case MSR_IA32_VMX_MISC: *puValue = pVmxMsrs->u64Misc; break;
262 case MSR_IA32_VMX_CR0_FIXED0: *puValue = pVmxMsrs->u64Cr0Fixed0; break;
263 case MSR_IA32_VMX_CR0_FIXED1: *puValue = pVmxMsrs->u64Cr0Fixed1; break;
264 case MSR_IA32_VMX_CR4_FIXED0: *puValue = pVmxMsrs->u64Cr4Fixed0; break;
265 case MSR_IA32_VMX_CR4_FIXED1: *puValue = pVmxMsrs->u64Cr4Fixed1; break;
266 case MSR_IA32_VMX_VMCS_ENUM: *puValue = pVmxMsrs->u64VmcsEnum; break;
267 case MSR_IA32_VMX_VMFUNC: *puValue = pVmxMsrs->u64VmFunc; break;
268 case MSR_IA32_VMX_EPT_VPID_CAP: *puValue = pVmxMsrs->u64EptVpidCaps; break;
269 default:
270 {
271 AssertMsgFailed(("Invalid MSR %#x\n", idMsr));
272 return VERR_NOT_FOUND;
273 }
274 }
275 return VINF_SUCCESS;
276}
277
278
279/**
280 * Gets the description of a VMX instruction diagnostic enum member.
281 *
282 * @returns The descriptive string.
283 * @param enmInstrDiag The VMX instruction diagnostic.
284 */
285VMM_INT_DECL(const char *) HMVmxGetInstrDiagDesc(VMXVINSTRDIAG enmInstrDiag)
286{
287 if (RT_LIKELY((unsigned)enmInstrDiag < RT_ELEMENTS(g_apszVmxInstrDiagDesc)))
288 return g_apszVmxInstrDiagDesc[enmInstrDiag];
289 return "Unknown/invalid";
290}
291
292
293/**
294 * Checks if a code selector (CS) is suitable for execution using hardware-assisted
295 * VMX when unrestricted execution isn't available.
296 *
297 * @returns true if selector is suitable for VMX, otherwise
298 * false.
299 * @param pSel Pointer to the selector to check (CS).
300 * @param uStackDpl The CPL, aka the DPL of the stack segment.
301 */
302static bool hmVmxIsCodeSelectorOk(PCCPUMSELREG pSel, unsigned uStackDpl)
303{
304 /*
305 * Segment must be an accessed code segment, it must be present and it must
306 * be usable.
307 * Note! These are all standard requirements and if CS holds anything else
308 * we've got buggy code somewhere!
309 */
310 AssertCompile(X86DESCATTR_TYPE == 0xf);
311 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P | X86DESCATTR_UNUSABLE))
312 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P),
313 ("%#x\n", pSel->Attr.u),
314 false);
315
316 /* For conforming segments, CS.DPL must be <= SS.DPL, while CS.DPL
317 must equal SS.DPL for non-confroming segments.
318 Note! This is also a hard requirement like above. */
319 AssertMsgReturn( pSel->Attr.n.u4Type & X86_SEL_TYPE_CONF
320 ? pSel->Attr.n.u2Dpl <= uStackDpl
321 : pSel->Attr.n.u2Dpl == uStackDpl,
322 ("u4Type=%#x u2Dpl=%u uStackDpl=%u\n", pSel->Attr.n.u4Type, pSel->Attr.n.u2Dpl, uStackDpl),
323 false);
324
325 /*
326 * The following two requirements are VT-x specific:
327 * - G bit must be set if any high limit bits are set.
328 * - G bit must be clear if any low limit bits are clear.
329 */
330 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
331 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
332 return true;
333 return false;
334}
335
336
337/**
338 * Checks if a data selector (DS/ES/FS/GS) is suitable for execution using
339 * hardware-assisted VMX when unrestricted execution isn't available.
340 *
341 * @returns true if selector is suitable for VMX, otherwise
342 * false.
343 * @param pSel Pointer to the selector to check
344 * (DS/ES/FS/GS).
345 */
346static bool hmVmxIsDataSelectorOk(PCCPUMSELREG pSel)
347{
348 /*
349 * Unusable segments are OK. These days they should be marked as such, as
350 * but as an alternative we for old saved states and AMD<->VT-x migration
351 * we also treat segments with all the attributes cleared as unusable.
352 */
353 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
354 return true;
355
356 /** @todo tighten these checks. Will require CPUM load adjusting. */
357
358 /* Segment must be accessed. */
359 if (pSel->Attr.u & X86_SEL_TYPE_ACCESSED)
360 {
361 /* Code segments must also be readable. */
362 if ( !(pSel->Attr.u & X86_SEL_TYPE_CODE)
363 || (pSel->Attr.u & X86_SEL_TYPE_READ))
364 {
365 /* The S bit must be set. */
366 if (pSel->Attr.n.u1DescType)
367 {
368 /* Except for conforming segments, DPL >= RPL. */
369 if ( pSel->Attr.n.u2Dpl >= (pSel->Sel & X86_SEL_RPL)
370 || pSel->Attr.n.u4Type >= X86_SEL_TYPE_ER_ACC)
371 {
372 /* Segment must be present. */
373 if (pSel->Attr.n.u1Present)
374 {
375 /*
376 * The following two requirements are VT-x specific:
377 * - G bit must be set if any high limit bits are set.
378 * - G bit must be clear if any low limit bits are clear.
379 */
380 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
381 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
382 return true;
383 }
384 }
385 }
386 }
387 }
388
389 return false;
390}
391
392
393/**
394 * Checks if the stack selector (SS) is suitable for execution using
395 * hardware-assisted VMX when unrestricted execution isn't available.
396 *
397 * @returns true if selector is suitable for VMX, otherwise
398 * false.
399 * @param pSel Pointer to the selector to check (SS).
400 */
401static bool hmVmxIsStackSelectorOk(PCCPUMSELREG pSel)
402{
403 /*
404 * Unusable segments are OK. These days they should be marked as such, as
405 * but as an alternative we for old saved states and AMD<->VT-x migration
406 * we also treat segments with all the attributes cleared as unusable.
407 */
408 /** @todo r=bird: actually all zeroes isn't gonna cut it... SS.DPL == CPL. */
409 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
410 return true;
411
412 /*
413 * Segment must be an accessed writable segment, it must be present.
414 * Note! These are all standard requirements and if SS holds anything else
415 * we've got buggy code somewhere!
416 */
417 AssertCompile(X86DESCATTR_TYPE == 0xf);
418 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P | X86_SEL_TYPE_CODE))
419 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P),
420 ("%#x\n", pSel->Attr.u), false);
421
422 /* DPL must equal RPL.
423 Note! This is also a hard requirement like above. */
424 AssertMsgReturn(pSel->Attr.n.u2Dpl == (pSel->Sel & X86_SEL_RPL),
425 ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel), false);
426
427 /*
428 * The following two requirements are VT-x specific:
429 * - G bit must be set if any high limit bits are set.
430 * - G bit must be clear if any low limit bits are clear.
431 */
432 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
433 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
434 return true;
435 return false;
436}
437
438
439/**
440 * Checks if the guest is in a suitable state for hardware-assisted VMX execution.
441 *
442 * @returns @c true if it is suitable, @c false otherwise.
443 * @param pVCpu The cross context virtual CPU structure.
444 * @param pCtx Pointer to the guest CPU context.
445 *
446 * @remarks @a pCtx can be a partial context and thus may not be necessarily the
447 * same as pVCpu->cpum.GstCtx! Thus don't eliminate the @a pCtx parameter.
448 * Secondly, if additional checks are added that require more of the CPU
449 * state, make sure REM (which supplies a partial state) is updated.
450 */
451VMM_INT_DECL(bool) HMVmxCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx)
452{
453 PVM pVM = pVCpu->CTX_SUFF(pVM);
454 Assert(HMIsEnabled(pVM));
455 Assert(!CPUMIsGuestVmxEnabled(pCtx));
456 Assert( ( pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS)
457 || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));
458
459 pVCpu->hm.s.fActive = false;
460
461 bool const fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVmmDevHeapIsEnabled(pVM);
462 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
463 {
464 /*
465 * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted
466 * guest execution feature is missing (VT-x only).
467 */
468 if (fSupportsRealMode)
469 {
470 if (CPUMIsGuestInRealModeEx(pCtx))
471 {
472 /*
473 * In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
474 * bases and limits, i.e. limit must be 64K and base must be selector * 16.
475 * If this is not true, we cannot execute real mode as V86 and have to fall
476 * back to emulation.
477 */
478 if ( pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)
479 || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)
480 || pCtx->es.Sel != (pCtx->es.u64Base >> 4)
481 || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)
482 || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)
483 || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4))
484 {
485 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelBase);
486 return false;
487 }
488 if ( (pCtx->cs.u32Limit != 0xffff)
489 || (pCtx->ds.u32Limit != 0xffff)
490 || (pCtx->es.u32Limit != 0xffff)
491 || (pCtx->ss.u32Limit != 0xffff)
492 || (pCtx->fs.u32Limit != 0xffff)
493 || (pCtx->gs.u32Limit != 0xffff))
494 {
495 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit);
496 return false;
497 }
498 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckRmOk);
499 }
500 else
501 {
502 /*
503 * Verify the requirements for executing code in protected mode. VT-x can't
504 * handle the CPU state right after a switch from real to protected mode
505 * (all sorts of RPL & DPL assumptions).
506 */
507 if (pVCpu->hm.s.vmx.fWasInRealMode)
508 {
509 /** @todo If guest is in V86 mode, these checks should be different! */
510 if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL))
511 {
512 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRpl);
513 return false;
514 }
515 if ( !hmVmxIsCodeSelectorOk(&pCtx->cs, pCtx->ss.Attr.n.u2Dpl)
516 || !hmVmxIsDataSelectorOk(&pCtx->ds)
517 || !hmVmxIsDataSelectorOk(&pCtx->es)
518 || !hmVmxIsDataSelectorOk(&pCtx->fs)
519 || !hmVmxIsDataSelectorOk(&pCtx->gs)
520 || !hmVmxIsStackSelectorOk(&pCtx->ss))
521 {
522 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
523 return false;
524 }
525 }
526 /* VT-x also chokes on invalid TR or LDTR selectors (minix). */
527 if (pCtx->gdtr.cbGdt)
528 {
529 if ((pCtx->tr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
530 {
531 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadTr);
532 return false;
533 }
534 else if ((pCtx->ldtr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
535 {
536 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadLdt);
537 return false;
538 }
539 }
540 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckPmOk);
541 }
542 }
543 else
544 {
545 if ( !CPUMIsGuestInLongModeEx(pCtx)
546 && !pVM->hm.s.vmx.fUnrestrictedGuest)
547 {
548 if ( !pVM->hm.s.fNestedPaging /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
549 || CPUMIsGuestInRealModeEx(pCtx)) /* Requires a fake TSS for real mode - stored in the VMM device heap */
550 return false;
551
552 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
553 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0)
554 return false;
555
556 /*
557 * The guest is about to complete the switch to protected mode. Wait a bit longer.
558 * Windows XP; switch to protected mode; all selectors are marked not present
559 * in the hidden registers (possible recompiler bug; see load_seg_vm).
560 */
561 /** @todo Is this supposed recompiler bug still relevant with IEM? */
562 if (pCtx->cs.Attr.n.u1Present == 0)
563 return false;
564 if (pCtx->ss.Attr.n.u1Present == 0)
565 return false;
566
567 /*
568 * Windows XP: possible same as above, but new recompiler requires new
569 * heuristics? VT-x doesn't seem to like something about the guest state and
570 * this stuff avoids it.
571 */
572 /** @todo This check is actually wrong, it doesn't take the direction of the
573 * stack segment into account. But, it does the job for now. */
574 if (pCtx->rsp >= pCtx->ss.u32Limit)
575 return false;
576 }
577 }
578 }
579
580 if (pVM->hm.s.vmx.fEnabled)
581 {
582 uint32_t uCr0Mask;
583
584 /* If bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
585 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr0Fixed0;
586
587 /* We ignore the NE bit here on purpose; see HMR0.cpp for details. */
588 uCr0Mask &= ~X86_CR0_NE;
589
590 if (fSupportsRealMode)
591 {
592 /* We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
593 uCr0Mask &= ~(X86_CR0_PG|X86_CR0_PE);
594 }
595 else
596 {
597 /* We support protected mode without paging using identity mapping. */
598 uCr0Mask &= ~X86_CR0_PG;
599 }
600 if ((pCtx->cr0 & uCr0Mask) != uCr0Mask)
601 return false;
602
603 /* If bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
604 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
605 if ((pCtx->cr0 & uCr0Mask) != 0)
606 return false;
607
608 /* If bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
609 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr4Fixed0;
610 uCr0Mask &= ~X86_CR4_VMXE;
611 if ((pCtx->cr4 & uCr0Mask) != uCr0Mask)
612 return false;
613
614 /* If bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
615 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
616 if ((pCtx->cr4 & uCr0Mask) != 0)
617 return false;
618
619 pVCpu->hm.s.fActive = true;
620 return true;
621 }
622
623 return false;
624}
625
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette