VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp@ 74187

Last change on this file since 74187 was 74187, checked in by vboxsync, 7 years ago

VMM/IEM: Nested VMX: bugref:9180 vmlaunch/vmresume bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 46.6 KB
Line 
1/* $Id: HMVMXAll.cpp 74187 2018-09-11 08:27:27Z vboxsync $ */
2/** @file
3 * HM VMX (VT-x) - All contexts.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include "HMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/pdmapi.h>
27
28
29/*********************************************************************************************************************************
30* Global Variables *
31*********************************************************************************************************************************/
32#define VMXV_DIAG_DESC(a_Def, a_Desc) #a_Def " - " #a_Desc
33static const char * const g_apszVmxVDiagDesc[] =
34{
35 /* Internal processing errors. */
36 VMXV_DIAG_DESC(kVmxVDiag_Ipe_1 , "Ipe_1" ),
37 VMXV_DIAG_DESC(kVmxVDiag_Ipe_2 , "Ipe_2" ),
38 VMXV_DIAG_DESC(kVmxVDiag_Ipe_3 , "Ipe_3" ),
39 VMXV_DIAG_DESC(kVmxVDiag_Ipe_4 , "Ipe_4" ),
40 VMXV_DIAG_DESC(kVmxVDiag_Ipe_5 , "Ipe_5" ),
41 VMXV_DIAG_DESC(kVmxVDiag_Ipe_6 , "Ipe_6" ),
42 VMXV_DIAG_DESC(kVmxVDiag_Ipe_7 , "Ipe_7" ),
43 VMXV_DIAG_DESC(kVmxVDiag_Ipe_8 , "Ipe_8" ),
44 VMXV_DIAG_DESC(kVmxVDiag_Ipe_9 , "Ipe_9" ),
45 /* VMXON. */
46 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_A20M , "A20M" ),
47 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_Cpl , "Cpl" ),
48 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_Cr0Fixed0 , "Cr0Fixed0" ),
49 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_Cr4Fixed0 , "Cr4Fixed0" ),
50 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_Intercept , "Intercept" ),
51 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_LongModeCS , "LongModeCS" ),
52 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_MsrFeatCtl , "MsrFeatCtl" ),
53 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_PtrAbnormal , "PtrAbnormal" ),
54 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_PtrAlign , "PtrAlign" ),
55 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_PtrMap , "PtrMap" ),
56 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_PtrReadPhys , "PtrReadPhys" ),
57 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_PtrWidth , "PtrWidth" ),
58 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_RealOrV86Mode , "RealOrV86Mode" ),
59 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_ShadowVmcs , "ShadowVmcs" ),
60 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_Success , "Success" ),
61 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_VmxAlreadyRoot , "VmxAlreadyRoot" ),
62 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_Vmxe , "Vmxe" ),
63 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_VmcsRevId , "VmcsRevId" ),
64 VMXV_DIAG_DESC(kVmxVDiag_Vmxon_VmxRootCpl , "VmxRootCpl" ),
65 /* VMXOFF. */
66 VMXV_DIAG_DESC(kVmxVDiag_Vmxoff_Cpl , "Cpl" ),
67 VMXV_DIAG_DESC(kVmxVDiag_Vmxoff_Intercept , "Intercept" ),
68 VMXV_DIAG_DESC(kVmxVDiag_Vmxoff_LongModeCS , "LongModeCS" ),
69 VMXV_DIAG_DESC(kVmxVDiag_Vmxoff_RealOrV86Mode , "RealOrV86Mode" ),
70 VMXV_DIAG_DESC(kVmxVDiag_Vmxoff_Success , "Success" ),
71 VMXV_DIAG_DESC(kVmxVDiag_Vmxoff_Vmxe , "Vmxe" ),
72 VMXV_DIAG_DESC(kVmxVDiag_Vmxoff_VmxRoot , "VmxRoot" ),
73 /* VMPTRLD. */
74 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_Cpl , "Cpl" ),
75 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_LongModeCS , "LongModeCS" ),
76 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_PtrAbnormal , "PtrAbnormal" ),
77 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_PtrAlign , "PtrAlign" ),
78 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_PtrMap , "PtrMap" ),
79 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_PtrReadPhys , "PtrReadPhys" ),
80 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_PtrVmxon , "PtrVmxon" ),
81 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_PtrWidth , "PtrWidth" ),
82 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_RealOrV86Mode , "RealOrV86Mode" ),
83 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_ShadowVmcs , "ShadowVmcs" ),
84 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_Success , "Success" ),
85 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_VmcsRevId , "VmcsRevId" ),
86 VMXV_DIAG_DESC(kVmxVDiag_Vmptrld_VmxRoot , "VmxRoot" ),
87 /* VMPTRST. */
88 VMXV_DIAG_DESC(kVmxVDiag_Vmptrst_Cpl , "Cpl" ),
89 VMXV_DIAG_DESC(kVmxVDiag_Vmptrst_LongModeCS , "LongModeCS" ),
90 VMXV_DIAG_DESC(kVmxVDiag_Vmptrst_PtrMap , "PtrMap" ),
91 VMXV_DIAG_DESC(kVmxVDiag_Vmptrst_RealOrV86Mode , "RealOrV86Mode" ),
92 VMXV_DIAG_DESC(kVmxVDiag_Vmptrst_Success , "Success" ),
93 VMXV_DIAG_DESC(kVmxVDiag_Vmptrst_VmxRoot , "VmxRoot" ),
94 /* VMCLEAR. */
95 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_Cpl , "Cpl" ),
96 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_LongModeCS , "LongModeCS" ),
97 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_PtrAbnormal , "PtrAbnormal" ),
98 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_PtrAlign , "PtrAlign" ),
99 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_PtrMap , "PtrMap" ),
100 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_PtrReadPhys , "PtrReadPhys" ),
101 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_PtrVmxon , "PtrVmxon" ),
102 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_PtrWidth , "PtrWidth" ),
103 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_RealOrV86Mode , "RealOrV86Mode" ),
104 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_Success , "Success" ),
105 VMXV_DIAG_DESC(kVmxVDiag_Vmclear_VmxRoot , "VmxRoot" ),
106 /* VMWRITE. */
107 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_Cpl , "Cpl" ),
108 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_FieldInvalid , "FieldInvalid" ),
109 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_FieldRo , "FieldRo" ),
110 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_LinkPtrInvalid , "LinkPtrInvalid" ),
111 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_LongModeCS , "LongModeCS" ),
112 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_PtrInvalid , "PtrInvalid" ),
113 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_PtrMap , "PtrMap" ),
114 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_RealOrV86Mode , "RealOrV86Mode" ),
115 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_Success , "Success" ),
116 VMXV_DIAG_DESC(kVmxVDiag_Vmwrite_VmxRoot , "VmxRoot" ),
117 /* VMREAD. */
118 VMXV_DIAG_DESC(kVmxVDiag_Vmread_Cpl , "Cpl" ),
119 VMXV_DIAG_DESC(kVmxVDiag_Vmread_FieldInvalid , "FieldInvalid" ),
120 VMXV_DIAG_DESC(kVmxVDiag_Vmread_LinkPtrInvalid , "LinkPtrInvalid" ),
121 VMXV_DIAG_DESC(kVmxVDiag_Vmread_LongModeCS , "LongModeCS" ),
122 VMXV_DIAG_DESC(kVmxVDiag_Vmread_PtrInvalid , "PtrInvalid" ),
123 VMXV_DIAG_DESC(kVmxVDiag_Vmread_PtrMap , "PtrMap" ),
124 VMXV_DIAG_DESC(kVmxVDiag_Vmread_RealOrV86Mode , "RealOrV86Mode" ),
125 VMXV_DIAG_DESC(kVmxVDiag_Vmread_Success , "Success" ),
126 VMXV_DIAG_DESC(kVmxVDiag_Vmread_VmxRoot , "VmxRoot" ),
127 /* VMLAUNCH/VMRESUME. */
128 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrApicAccess , "AddrApicAccess" ),
129 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrEntryMsrLoad , "AddrEntryMsrLoad" ),
130 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrExitMsrLoad , "AddrExitMsrLoad" ),
131 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrExitMsrStore , "AddrExitMsrStore" ),
132 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrIoBitmapA , "AddrIoBitmapA" ),
133 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrIoBitmapB , "AddrIoBitmapB" ),
134 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrMsrBitmap , "AddrMsrBitmap" ),
135 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrVirtApicPage , "AddrVirtApicPage" ),
136 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrVmcsLinkPtr , "AddrVmcsLinkPtr" ),
137 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrVmreadBitmap , "AddrVmreadBitmap" ),
138 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_AddrVmwriteBitmap , "AddrVmwriteBitmap" ),
139 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_ApicRegVirt , "ApicRegVirt" ),
140 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_BlocKMovSS , "BlockMovSS" ),
141 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_Cpl , "Cpl" ),
142 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_Cr3TargetCount , "Cr3TargetCount" ),
143 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EntryCtlsAllowed1 , "EntryCtlsAllowed1" ),
144 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EntryCtlsDisallowed0 , "EntryCtlsDisallowed0" ),
145 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EntryInstrLen , "EntryInstrLen" ),
146 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EntryInstrLenZero , "EntryInstrLenZero" ),
147 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EntryIntInfoErrCodePe , "EntryIntInfoErrCodePe" ),
148 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EntryIntInfoErrCodeVec , "EntryIntInfoErrCodeVec" ),
149 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EntryIntInfoTypeVecRsvd , "EntryIntInfoTypeVecRsvd" ),
150 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_EntryXcptErrCodeRsvd , "EntryXcptErrCodeRsvd" ),
151 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_ExitCtlsAllowed1 , "ExitCtlsAllowed1" ),
152 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_ExitCtlsDisallowed0 , "ExitCtlsDisallowed0" ),
153 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestActStateHlt , "GuestActStateHlt" ),
154 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestActStateRsvd , "GuestActStateRsvd" ),
155 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestActStateShutdown , "GuestActStateShutdown" ),
156 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestActStateSsDpl , "GuestActStateSsDpl" ),
157 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestActStateStiMovSs , "GuestActStateStiMovSs" ),
158 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestCr0Fixed0 , "GuestCr0Fixed0" ),
159 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestCr0Fixed1 , "GuestCr0Fixed1" ),
160 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestCr0PgPe , "GuestCr0PgPe" ),
161 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestCr3 , "GuestCr3" ),
162 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestCr4Fixed0 , "GuestCr4Fixed0" ),
163 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestCr4Fixed1 , "GuestCr4Fixed1" ),
164 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestDebugCtl , "GuestDebugCtl" ),
165 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestDr7 , "GuestDr7" ),
166 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestEferMsr , "GuestEferMsr" ),
167 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestEferMsrRsvd , "GuestEferMsrRsvd" ),
168 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestGdtrBase , "GuestGdtrBase" ),
169 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestGdtrLimit , "GuestGdtrLimit" ),
170 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIdtrBase , "GuestIdtrBase" ),
171 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIdtrLimit , "GuestIdtrLimit" ),
172 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIntStateEnclave , "GuestIntStateEnclave" ),
173 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIntStateExtInt , "GuestIntStateExtInt" ),
174 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIntStateNmi , "GuestIntStateNmi" ),
175 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIntStateRFlagsSti , "GuestIntStateRFlagsSti" ),
176 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIntStateRsvd , "GuestIntStateRsvd" ),
177 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIntStateSmi , "GuestIntStateSmi" ),
178 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIntStateStiMovSs , "GuestIntStateStiMovSs" ),
179 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestIntStateVirtNmi , "GuestIntStateVirtNmi" ),
180 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPae , "GuestPae" ),
181 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPatMsr , "GuestPatMsr" ),
182 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPcide , "GuestPcide" ),
183 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpteCr3ReadPhys , "GuestPdpteCr3ReadPhys" ),
184 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte0Rsvd , "GuestPdpte0Rsvd" ),
185 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte1Rsvd , "GuestPdpte1Rsvd" ),
186 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte2Rsvd , "GuestPdpte2Rsvd" ),
187 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPdpte3Rsvd , "GuestPdpte3Rsvd" ),
188 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPndDbgXcptBsNoTf , "GuestPndDbgXcptBsNoTf" ),
189 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPndDbgXcptBsTf , "GuestPndDbgXcptBsTf" ),
190 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPndDbgXcptRsvd , "GuestPndDbgXcptRsvd" ),
191 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestPndDbgXcptRtm , "GuestPndDbgXcptRtm" ),
192 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestRip , "GuestRip" ),
193 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestRipRsvd , "GuestRipRsvd" ),
194 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestRFlagsIf , "GuestRFlagsIf" ),
195 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestRFlagsRsvd , "GuestRFlagsRsvd" ),
196 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestRFlagsVm , "GuestRFlagsVm" ),
197 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrCsDefBig , "GuestSegAttrCsDefBig" ),
198 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrCsDplEqSs , "GuestSegAttrCsDplEqSs" ),
199 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrCsDplLtSs , "GuestSegAttrCsDplLtSs" ),
200 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrCsDplZero , "GuestSegAttrCsDplZero" ),
201 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrCsType , "GuestSegAttrCsType" ),
202 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrCsTypeRead , "GuestSegAttrCsTypeRead" ),
203 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDescTypeCs , "GuestSegAttrDescTypeCs" ),
204 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDescTypeDs , "GuestSegAttrDescTypeDs" ),
205 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDescTypeEs , "GuestSegAttrDescTypeEs" ),
206 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDescTypeFs , "GuestSegAttrDescTypeFs" ),
207 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDescTypeGs , "GuestSegAttrDescTypeGs" ),
208 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDescTypeSs , "GuestSegAttrDescTypeSs" ),
209 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDplRplCs , "GuestSegAttrDplRplCs" ),
210 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDplRplDs , "GuestSegAttrDplRplDs" ),
211 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDplRplEs , "GuestSegAttrDplRplEs" ),
212 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDplRplFs , "GuestSegAttrDplRplFs" ),
213 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDplRplGs , "GuestSegAttrDplRplGs" ),
214 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrDplRplSs , "GuestSegAttrDplRplSs" ),
215 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrGranCs , "GuestSegAttrGranCs" ),
216 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrGranDs , "GuestSegAttrGranDs" ),
217 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrGranEs , "GuestSegAttrGranEs" ),
218 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrGranFs , "GuestSegAttrGranFs" ),
219 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrGranGs , "GuestSegAttrGranGs" ),
220 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrGranSs , "GuestSegAttrGranSs" ),
221 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrLdtrDescType , "GuestSegAttrLdtrDescType" ),
222 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrLdtrGran , "GuestSegAttrLdtrGran" ),
223 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrLdtrPresent , "GuestSegAttrLdtrPresent" ),
224 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrLdtrRsvd , "GuestSegAttrLdtrRsvd" ),
225 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrLdtrType , "GuestSegAttrLdtrType" ),
226 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrPresentCs , "GuestSegAttrPresentCs" ),
227 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrPresentDs , "GuestSegAttrPresentDs" ),
228 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrPresentEs , "GuestSegAttrPresentEs" ),
229 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrPresentFs , "GuestSegAttrPresentFs" ),
230 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrPresentGs , "GuestSegAttrPresentGs" ),
231 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrPresentSs , "GuestSegAttrPresentSs" ),
232 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrRsvdCs , "GuestSegAttrRsvdCs" ),
233 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrRsvdDs , "GuestSegAttrRsvdDs" ),
234 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrRsvdEs , "GuestSegAttrRsvdEs" ),
235 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrRsvdFs , "GuestSegAttrRsvdFs" ),
236 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrRsvdGs , "GuestSegAttrRsvdGs" ),
237 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrRsvdSs , "GuestSegAttrRsvdSs" ),
238 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrSsDplEqRpl , "GuestSegAttrSsDplEqRpl" ),
239 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrSsDplZero , "GuestSegAttrSsDplZero " ),
240 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrSsType , "GuestSegAttrSsType" ),
241 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTrDescType , "GuestSegAttrTrDescType" ),
242 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTrGran , "GuestSegAttrTrGran" ),
243 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTrPresent , "GuestSegAttrTrPresent" ),
244 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTrRsvd , "GuestSegAttrTrRsvd" ),
245 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTrType , "GuestSegAttrTrType" ),
246 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTrUnusable , "GuestSegAttrTrUnusable" ),
247 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTypeAccCs , "GuestSegAttrTypeAccCs" ),
248 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTypeAccDs , "GuestSegAttrTypeAccDs" ),
249 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTypeAccEs , "GuestSegAttrTypeAccEs" ),
250 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTypeAccFs , "GuestSegAttrTypeAccFs" ),
251 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTypeAccGs , "GuestSegAttrTypeAccGs" ),
252 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrTypeAccSs , "GuestSegAttrTypeAccSs" ),
253 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrV86Cs , "GuestSegAttrV86Cs" ),
254 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrV86Ds , "GuestSegAttrV86Ds" ),
255 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrV86Es , "GuestSegAttrV86Es" ),
256 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrV86Fs , "GuestSegAttrV86Fs" ),
257 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrV86Gs , "GuestSegAttrV86Gs" ),
258 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegAttrV86Ss , "GuestSegAttrV86Ss" ),
259 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseCs , "GuestSegBaseCs" ),
260 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseDs , "GuestSegBaseDs" ),
261 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseEs , "GuestSegBaseEs" ),
262 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseFs , "GuestSegBaseFs" ),
263 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseGs , "GuestSegBaseGs" ),
264 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseLdtr , "GuestSegBaseLdtr" ),
265 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseSs , "GuestSegBaseSs" ),
266 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseTr , "GuestSegBaseTr" ),
267 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseV86Cs , "GuestSegBaseV86Cs" ),
268 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseV86Ds , "GuestSegBaseV86Ds" ),
269 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseV86Es , "GuestSegBaseV86Es" ),
270 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseV86Fs , "GuestSegBaseV86Fs" ),
271 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseV86Gs , "GuestSegBaseV86Gs" ),
272 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegBaseV86Ss , "GuestSegBaseV86Ss" ),
273 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegLimitV86Cs , "GuestSegLimitV86Cs" ),
274 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegLimitV86Ds , "GuestSegLimitV86Ds" ),
275 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegLimitV86Es , "GuestSegLimitV86Es" ),
276 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegLimitV86Fs , "GuestSegLimitV86Fs" ),
277 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegLimitV86Gs , "GuestSegLimitV86Gs" ),
278 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegLimitV86Ss , "GuestSegLimitV86Ss" ),
279 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegSelCsSsRpl , "GuestSegSelCsSsRpl" ),
280 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegSelLdtr , "GuestSegSelLdtr" ),
281 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSegSelTr , "GuestSegSelTr" ),
282 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_GuestSysenterEspEip , "GuestSysenterEspEip" ),
283 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmcsLinkPtrCurVmcs , "VmcsLinkPtrCurVmcs" ),
284 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmcsLinkPtrReadPhys , "VmcsLinkPtrReadPhys" ),
285 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmcsLinkPtrRevId , "VmcsLinkPtrRevId" ),
286 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmcsLinkPtrShadow , "VmcsLinkPtrShadow" ),
287 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostCr0Fixed0 , "HostCr0Fixed0" ),
288 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostCr0Fixed1 , "HostCr0Fixed1" ),
289 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostCr3 , "HostCr3" ),
290 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostCr4Fixed0 , "HostCr4Fixed0" ),
291 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostCr4Fixed1 , "HostCr4Fixed1" ),
292 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostCr4Pae , "HostCr4Pae" ),
293 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostCr4Pcide , "HostCr4Pcide" ),
294 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostCsTr , "HostCsTr" ),
295 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostEferMsr , "HostEferMsr" ),
296 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostEferMsrRsvd , "HostEferMsrRsvd" ),
297 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostGuestLongMode , "HostGuestLongMode" ),
298 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostGuestLongModeNoCpu , "HostGuestLongModeNoCpu" ),
299 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostLongMode , "HostLongMode" ),
300 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostPatMsr , "HostPatMsr" ),
301 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostRip , "HostRip" ),
302 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostRipRsvd , "HostRipRsvd" ),
303 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostSel , "HostSel" ),
304 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostSegBase , "HostSegBase" ),
305 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostSs , "HostSs" ),
306 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_HostSysenterEspEip , "HostSysenterEspEip" ),
307 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_LongModeCS , "LongModeCS" ),
308 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_NmiWindowExit , "NmiWindowExit" ),
309 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_PinCtlsAllowed1 , "PinCtlsAllowed1" ),
310 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_PinCtlsDisallowed0 , "PinCtlsDisallowed0" ),
311 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_ProcCtlsAllowed1 , "ProcCtlsAllowed1" ),
312 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_ProcCtlsDisallowed0 , "ProcCtlsDisallowed0" ),
313 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_ProcCtls2Allowed1 , "ProcCtls2Allowed1" ),
314 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_ProcCtls2Disallowed0 , "ProcCtls2Disallowed0" ),
315 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_PtrInvalid , "PtrInvalid" ),
316 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_PtrReadPhys , "PtrReadPhys" ),
317 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_RealOrV86Mode , "RealOrV86Mode" ),
318 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_SavePreemptTimer , "SavePreemptTimer" ),
319 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_Success , "Success" ),
320 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_TprThresholdRsvd , "TprThresholdRsvd" ),
321 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_TprThresholdVTpr , "TprThresholdVTpr" ),
322 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VirtApicPagePtrReadPhys , "VirtApicPageReadPhys" ),
323 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VirtIntDelivery , "VirtIntDelivery" ),
324 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VirtNmi , "VirtNmi" ),
325 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VirtX2ApicTprShadow , "VirtX2ApicTprShadow" ),
326 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VirtX2ApicVirtApic , "VirtX2ApicVirtApic" ),
327 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmcsClear , "VmcsClear" ),
328 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmcsLaunch , "VmcsLaunch" ),
329 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmreadBitmapPtrReadPhys , "VmreadBitmapPtrReadPhys" ),
330 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmwriteBitmapPtrReadPhys , "VmwriteBitmapPtrReadPhys" ),
331 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_VmxRoot , "VmxRoot" ),
332 VMXV_DIAG_DESC(kVmxVDiag_Vmentry_Vpid , "Vpid" )
333 /* kVmxVDiag_End */
334};
335AssertCompile(RT_ELEMENTS(g_apszVmxVDiagDesc) == kVmxVDiag_End);
336#undef VMXV_DIAG_DESC
337
338
339/**
340 * Gets a copy of the VMX host MSRs that were read by HM during ring-0
341 * initialization.
342 *
343 * @return VBox status code.
344 * @param pVM The cross context VM structure.
345 * @param pVmxMsrs Where to store the VMXMSRS struct (only valid when
346 * VINF_SUCCESS is returned).
347 *
348 * @remarks Caller needs to take care not to call this function too early. Call
349 * after HM initialization is fully complete.
350 */
351VMM_INT_DECL(int) HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs)
352{
353 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
354 AssertPtrReturn(pVmxMsrs, VERR_INVALID_PARAMETER);
355 if (pVM->hm.s.vmx.fSupported)
356 {
357 *pVmxMsrs = pVM->hm.s.vmx.Msrs;
358 return VINF_SUCCESS;
359 }
360 return VERR_VMX_NOT_SUPPORTED;
361}
362
363
364/**
365 * Gets the specified VMX host MSR that was read by HM during ring-0
366 * initialization.
367 *
368 * @return VBox status code.
369 * @param pVM The cross context VM structure.
370 * @param idMsr The MSR.
371 * @param puValue Where to store the MSR value (only updated when VINF_SUCCESS
372 * is returned).
373 *
374 * @remarks Caller needs to take care not to call this function too early. Call
375 * after HM initialization is fully complete.
376 */
377VMM_INT_DECL(int) HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue)
378{
379 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
380 AssertPtrReturn(puValue, VERR_INVALID_PARAMETER);
381
382 if (pVM->hm.s.vmx.fSupported)
383 {
384 PCVMXMSRS pVmxMsrs = &pVM->hm.s.vmx.Msrs;
385 switch (idMsr)
386 {
387 case MSR_IA32_FEATURE_CONTROL: *puValue = pVmxMsrs->u64FeatCtrl; break;
388 case MSR_IA32_VMX_BASIC: *puValue = pVmxMsrs->u64Basic; break;
389 case MSR_IA32_VMX_PINBASED_CTLS: *puValue = pVmxMsrs->PinCtls.u; break;
390 case MSR_IA32_VMX_PROCBASED_CTLS: *puValue = pVmxMsrs->ProcCtls.u; break;
391 case MSR_IA32_VMX_PROCBASED_CTLS2: *puValue = pVmxMsrs->ProcCtls2.u; break;
392 case MSR_IA32_VMX_EXIT_CTLS: *puValue = pVmxMsrs->ExitCtls.u; break;
393 case MSR_IA32_VMX_ENTRY_CTLS: *puValue = pVmxMsrs->EntryCtls.u; break;
394 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: *puValue = pVmxMsrs->TruePinCtls.u; break;
395 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *puValue = pVmxMsrs->TrueProcCtls.u; break;
396 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: *puValue = pVmxMsrs->TrueEntryCtls.u; break;
397 case MSR_IA32_VMX_TRUE_EXIT_CTLS: *puValue = pVmxMsrs->TrueExitCtls.u; break;
398 case MSR_IA32_VMX_MISC: *puValue = pVmxMsrs->u64Misc; break;
399 case MSR_IA32_VMX_CR0_FIXED0: *puValue = pVmxMsrs->u64Cr0Fixed0; break;
400 case MSR_IA32_VMX_CR0_FIXED1: *puValue = pVmxMsrs->u64Cr0Fixed1; break;
401 case MSR_IA32_VMX_CR4_FIXED0: *puValue = pVmxMsrs->u64Cr4Fixed0; break;
402 case MSR_IA32_VMX_CR4_FIXED1: *puValue = pVmxMsrs->u64Cr4Fixed1; break;
403 case MSR_IA32_VMX_VMCS_ENUM: *puValue = pVmxMsrs->u64VmcsEnum; break;
404 case MSR_IA32_VMX_VMFUNC: *puValue = pVmxMsrs->u64VmFunc; break;
405 case MSR_IA32_VMX_EPT_VPID_CAP: *puValue = pVmxMsrs->u64EptVpidCaps; break;
406 default:
407 {
408 AssertMsgFailed(("Invalid MSR %#x\n", idMsr));
409 return VERR_NOT_FOUND;
410 }
411 }
412 return VINF_SUCCESS;
413 }
414 return VERR_VMX_NOT_SUPPORTED;
415}
416
417
418/**
419 * Gets the description of a VMX instruction/Vm-exit diagnostic.
420 *
421 * @returns The descriptive string.
422 * @param enmDiag The VMX diagnostic.
423 */
424VMM_INT_DECL(const char *) HMVmxGetDiagDesc(VMXVDIAG enmDiag)
425{
426 if (RT_LIKELY((unsigned)enmDiag < RT_ELEMENTS(g_apszVmxVDiagDesc)))
427 return g_apszVmxVDiagDesc[enmDiag];
428 return "Unknown/invalid";
429}
430
431
432/**
433 * Checks if a code selector (CS) is suitable for execution using hardware-assisted
434 * VMX when unrestricted execution isn't available.
435 *
436 * @returns true if selector is suitable for VMX, otherwise
437 * false.
438 * @param pSel Pointer to the selector to check (CS).
439 * @param uStackDpl The CPL, aka the DPL of the stack segment.
440 */
441static bool hmVmxIsCodeSelectorOk(PCCPUMSELREG pSel, unsigned uStackDpl)
442{
443 /*
444 * Segment must be an accessed code segment, it must be present and it must
445 * be usable.
446 * Note! These are all standard requirements and if CS holds anything else
447 * we've got buggy code somewhere!
448 */
449 AssertCompile(X86DESCATTR_TYPE == 0xf);
450 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P | X86DESCATTR_UNUSABLE))
451 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P),
452 ("%#x\n", pSel->Attr.u),
453 false);
454
455 /* For conforming segments, CS.DPL must be <= SS.DPL, while CS.DPL
456 must equal SS.DPL for non-confroming segments.
457 Note! This is also a hard requirement like above. */
458 AssertMsgReturn( pSel->Attr.n.u4Type & X86_SEL_TYPE_CONF
459 ? pSel->Attr.n.u2Dpl <= uStackDpl
460 : pSel->Attr.n.u2Dpl == uStackDpl,
461 ("u4Type=%#x u2Dpl=%u uStackDpl=%u\n", pSel->Attr.n.u4Type, pSel->Attr.n.u2Dpl, uStackDpl),
462 false);
463
464 /*
465 * The following two requirements are VT-x specific:
466 * - G bit must be set if any high limit bits are set.
467 * - G bit must be clear if any low limit bits are clear.
468 */
469 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
470 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
471 return true;
472 return false;
473}
474
475
476/**
477 * Checks if a data selector (DS/ES/FS/GS) is suitable for execution using
478 * hardware-assisted VMX when unrestricted execution isn't available.
479 *
480 * @returns true if selector is suitable for VMX, otherwise
481 * false.
482 * @param pSel Pointer to the selector to check
483 * (DS/ES/FS/GS).
484 */
485static bool hmVmxIsDataSelectorOk(PCCPUMSELREG pSel)
486{
487 /*
488 * Unusable segments are OK. These days they should be marked as such, as
489 * but as an alternative we for old saved states and AMD<->VT-x migration
490 * we also treat segments with all the attributes cleared as unusable.
491 */
492 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
493 return true;
494
495 /** @todo tighten these checks. Will require CPUM load adjusting. */
496
497 /* Segment must be accessed. */
498 if (pSel->Attr.u & X86_SEL_TYPE_ACCESSED)
499 {
500 /* Code segments must also be readable. */
501 if ( !(pSel->Attr.u & X86_SEL_TYPE_CODE)
502 || (pSel->Attr.u & X86_SEL_TYPE_READ))
503 {
504 /* The S bit must be set. */
505 if (pSel->Attr.n.u1DescType)
506 {
507 /* Except for conforming segments, DPL >= RPL. */
508 if ( pSel->Attr.n.u2Dpl >= (pSel->Sel & X86_SEL_RPL)
509 || pSel->Attr.n.u4Type >= X86_SEL_TYPE_ER_ACC)
510 {
511 /* Segment must be present. */
512 if (pSel->Attr.n.u1Present)
513 {
514 /*
515 * The following two requirements are VT-x specific:
516 * - G bit must be set if any high limit bits are set.
517 * - G bit must be clear if any low limit bits are clear.
518 */
519 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
520 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
521 return true;
522 }
523 }
524 }
525 }
526 }
527
528 return false;
529}
530
531
532/**
533 * Checks if the stack selector (SS) is suitable for execution using
534 * hardware-assisted VMX when unrestricted execution isn't available.
535 *
536 * @returns true if selector is suitable for VMX, otherwise
537 * false.
538 * @param pSel Pointer to the selector to check (SS).
539 */
540static bool hmVmxIsStackSelectorOk(PCCPUMSELREG pSel)
541{
542 /*
543 * Unusable segments are OK. These days they should be marked as such, as
544 * but as an alternative we for old saved states and AMD<->VT-x migration
545 * we also treat segments with all the attributes cleared as unusable.
546 */
547 /** @todo r=bird: actually all zeroes isn't gonna cut it... SS.DPL == CPL. */
548 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
549 return true;
550
551 /*
552 * Segment must be an accessed writable segment, it must be present.
553 * Note! These are all standard requirements and if SS holds anything else
554 * we've got buggy code somewhere!
555 */
556 AssertCompile(X86DESCATTR_TYPE == 0xf);
557 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P | X86_SEL_TYPE_CODE))
558 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P),
559 ("%#x\n", pSel->Attr.u), false);
560
561 /* DPL must equal RPL.
562 Note! This is also a hard requirement like above. */
563 AssertMsgReturn(pSel->Attr.n.u2Dpl == (pSel->Sel & X86_SEL_RPL),
564 ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel), false);
565
566 /*
567 * The following two requirements are VT-x specific:
568 * - G bit must be set if any high limit bits are set.
569 * - G bit must be clear if any low limit bits are clear.
570 */
571 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
572 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
573 return true;
574 return false;
575}
576
577
578/**
579 * Checks if the guest is in a suitable state for hardware-assisted VMX execution.
580 *
581 * @returns @c true if it is suitable, @c false otherwise.
582 * @param pVCpu The cross context virtual CPU structure.
583 * @param pCtx Pointer to the guest CPU context.
584 *
585 * @remarks @a pCtx can be a partial context and thus may not be necessarily the
586 * same as pVCpu->cpum.GstCtx! Thus don't eliminate the @a pCtx parameter.
587 * Secondly, if additional checks are added that require more of the CPU
588 * state, make sure REM (which supplies a partial state) is updated.
589 */
590VMM_INT_DECL(bool) HMVmxCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx)
591{
592 PVM pVM = pVCpu->CTX_SUFF(pVM);
593 Assert(HMIsEnabled(pVM));
594 Assert(!CPUMIsGuestVmxEnabled(pCtx));
595 Assert( ( pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS)
596 || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));
597
598 pVCpu->hm.s.fActive = false;
599
600 bool const fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVmmDevHeapIsEnabled(pVM);
601 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
602 {
603 /*
604 * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted
605 * guest execution feature is missing (VT-x only).
606 */
607 if (fSupportsRealMode)
608 {
609 if (CPUMIsGuestInRealModeEx(pCtx))
610 {
611 /*
612 * In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
613 * bases and limits, i.e. limit must be 64K and base must be selector * 16.
614 * If this is not true, we cannot execute real mode as V86 and have to fall
615 * back to emulation.
616 */
617 if ( pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)
618 || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)
619 || pCtx->es.Sel != (pCtx->es.u64Base >> 4)
620 || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)
621 || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)
622 || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4))
623 {
624 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelBase);
625 return false;
626 }
627 if ( (pCtx->cs.u32Limit != 0xffff)
628 || (pCtx->ds.u32Limit != 0xffff)
629 || (pCtx->es.u32Limit != 0xffff)
630 || (pCtx->ss.u32Limit != 0xffff)
631 || (pCtx->fs.u32Limit != 0xffff)
632 || (pCtx->gs.u32Limit != 0xffff))
633 {
634 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit);
635 return false;
636 }
637 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckRmOk);
638 }
639 else
640 {
641 /*
642 * Verify the requirements for executing code in protected mode. VT-x can't
643 * handle the CPU state right after a switch from real to protected mode
644 * (all sorts of RPL & DPL assumptions).
645 */
646 if (pVCpu->hm.s.vmx.fWasInRealMode)
647 {
648 /** @todo If guest is in V86 mode, these checks should be different! */
649 if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL))
650 {
651 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRpl);
652 return false;
653 }
654 if ( !hmVmxIsCodeSelectorOk(&pCtx->cs, pCtx->ss.Attr.n.u2Dpl)
655 || !hmVmxIsDataSelectorOk(&pCtx->ds)
656 || !hmVmxIsDataSelectorOk(&pCtx->es)
657 || !hmVmxIsDataSelectorOk(&pCtx->fs)
658 || !hmVmxIsDataSelectorOk(&pCtx->gs)
659 || !hmVmxIsStackSelectorOk(&pCtx->ss))
660 {
661 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
662 return false;
663 }
664 }
665 /* VT-x also chokes on invalid TR or LDTR selectors (minix). */
666 if (pCtx->gdtr.cbGdt)
667 {
668 if ((pCtx->tr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
669 {
670 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadTr);
671 return false;
672 }
673 else if ((pCtx->ldtr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
674 {
675 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadLdt);
676 return false;
677 }
678 }
679 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckPmOk);
680 }
681 }
682 else
683 {
684 if ( !CPUMIsGuestInLongModeEx(pCtx)
685 && !pVM->hm.s.vmx.fUnrestrictedGuest)
686 {
687 if ( !pVM->hm.s.fNestedPaging /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
688 || CPUMIsGuestInRealModeEx(pCtx)) /* Requires a fake TSS for real mode - stored in the VMM device heap */
689 return false;
690
691 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
692 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0)
693 return false;
694
695 /*
696 * The guest is about to complete the switch to protected mode. Wait a bit longer.
697 * Windows XP; switch to protected mode; all selectors are marked not present
698 * in the hidden registers (possible recompiler bug; see load_seg_vm).
699 */
700 /** @todo Is this supposed recompiler bug still relevant with IEM? */
701 if (pCtx->cs.Attr.n.u1Present == 0)
702 return false;
703 if (pCtx->ss.Attr.n.u1Present == 0)
704 return false;
705
706 /*
707 * Windows XP: possible same as above, but new recompiler requires new
708 * heuristics? VT-x doesn't seem to like something about the guest state and
709 * this stuff avoids it.
710 */
711 /** @todo This check is actually wrong, it doesn't take the direction of the
712 * stack segment into account. But, it does the job for now. */
713 if (pCtx->rsp >= pCtx->ss.u32Limit)
714 return false;
715 }
716 }
717 }
718
719 if (pVM->hm.s.vmx.fEnabled)
720 {
721 uint32_t uCr0Mask;
722
723 /* If bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
724 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr0Fixed0;
725
726 /* We ignore the NE bit here on purpose; see HMR0.cpp for details. */
727 uCr0Mask &= ~X86_CR0_NE;
728
729 if (fSupportsRealMode)
730 {
731 /* We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
732 uCr0Mask &= ~(X86_CR0_PG | X86_CR0_PE);
733 }
734 else
735 {
736 /* We support protected mode without paging using identity mapping. */
737 uCr0Mask &= ~X86_CR0_PG;
738 }
739 if ((pCtx->cr0 & uCr0Mask) != uCr0Mask)
740 return false;
741
742 /* If bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
743 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
744 if ((pCtx->cr0 & uCr0Mask) != 0)
745 return false;
746
747 /* If bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
748 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr4Fixed0;
749 uCr0Mask &= ~X86_CR4_VMXE;
750 if ((pCtx->cr4 & uCr0Mask) != uCr0Mask)
751 return false;
752
753 /* If bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
754 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
755 if ((pCtx->cr4 & uCr0Mask) != 0)
756 return false;
757
758 pVCpu->hm.s.fActive = true;
759 return true;
760 }
761
762 return false;
763}
764
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette