VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp@ 74065

Last change on this file since 74065 was 74065, checked in by vboxsync, 7 years ago

VMM/IEM, HM: Nested VMX: bugref:9180 vmlaunch/vmresume bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 32.4 KB
Line 
1/* $Id: HMVMXAll.cpp 74065 2018-09-04 12:38:25Z vboxsync $ */
2/** @file
3 * HM VMX (VT-x) - All contexts.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include "HMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/pdmapi.h>
27
28
29/*********************************************************************************************************************************
30* Global Variables *
31*********************************************************************************************************************************/
32#define VMX_INSTR_DIAG_DESC(a_Def, a_Desc) #a_Def " - " #a_Desc
33static const char * const g_apszVmxInstrDiagDesc[kVmxVInstrDiag_Last] =
34{
35 /* Internal processing errors. */
36 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_1 , "Ipe_1" ),
37 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_2 , "Ipe_2" ),
38 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_3 , "Ipe_3" ),
39 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_4 , "Ipe_4" ),
40 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_5 , "Ipe_5" ),
41 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_6 , "Ipe_6" ),
42 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_7 , "Ipe_7" ),
43 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_8 , "Ipe_8" ),
44 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_9 , "Ipe_9" ),
45 /* VMXON. */
46 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_A20M , "A20M" ),
47 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cpl , "Cpl" ),
48 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr0Fixed0 , "Cr0Fixed0" ),
49 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr4Fixed0 , "Cr4Fixed0" ),
50 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Intercept , "Intercept" ),
51 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_LongModeCS , "LongModeCS" ),
52 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_MsrFeatCtl , "MsrFeatCtl" ),
53 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAbnormal , "PtrAbnormal" ),
54 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAlign , "PtrAlign" ),
55 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrMap , "PtrMap" ),
56 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrPhysRead , "PtrPhysRead" ),
57 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrWidth , "PtrWidth" ),
58 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_RealOrV86Mode , "RealOrV86Mode" ),
59 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Success , "Success" ),
60 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_ShadowVmcs , "ShadowVmcs" ),
61 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxAlreadyRoot , "VmxAlreadyRoot" ),
62 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Vmxe , "Vmxe" ),
63 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmcsRevId , "VmcsRevId" ),
64 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxRootCpl , "VmxRootCpl" ),
65 /* VMXOFF. */
66 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Cpl , "Cpl" ),
67 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Intercept , "Intercept" ),
68 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_LongModeCS , "LongModeCS" ),
69 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_RealOrV86Mode , "RealOrV86Mode" ),
70 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Success , "Success" ),
71 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Vmxe , "Vmxe" ),
72 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_VmxRoot , "VmxRoot" ),
73 /* VMPTRLD. */
74 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_Cpl , "Cpl" ),
75 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_LongModeCS , "LongModeCS" ),
76 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrAbnormal , "PtrAbnormal" ),
77 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrAlign , "PtrAlign" ),
78 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrMap , "PtrMap" ),
79 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrReadPhys , "PtrReadPhys" ),
80 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrVmxon , "PtrVmxon" ),
81 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrWidth , "PtrWidth" ),
82 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_RealOrV86Mode , "RealOrV86Mode" ),
83 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_ShadowVmcs , "ShadowVmcs" ),
84 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_Success , "Success" ),
85 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_VmcsRevId , "VmcsRevId" ),
86 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_VmxRoot , "VmxRoot" ),
87 /* VMPTRST. */
88 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_Cpl , "Cpl" ),
89 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_LongModeCS , "LongModeCS" ),
90 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_PtrMap , "PtrMap" ),
91 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_RealOrV86Mode , "RealOrV86Mode" ),
92 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_Success , "Success" ),
93 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_VmxRoot , "VmxRoot" ),
94 /* VMCLEAR. */
95 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Cpl , "Cpl" ),
96 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_LongModeCS , "LongModeCS" ),
97 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrAbnormal , "PtrAbnormal" ),
98 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrAlign , "PtrAlign" ),
99 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrMap , "PtrMap" ),
100 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrReadPhys , "PtrReadPhys" ),
101 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrVmxon , "PtrVmxon" ),
102 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrWidth , "PtrWidth" ),
103 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_RealOrV86Mode , "RealOrV86Mode" ),
104 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Success , "Success" ),
105 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_VmxRoot , "VmxRoot" ),
106 /* VMWRITE. */
107 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_Cpl , "Cpl" ),
108 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_FieldInvalid , "FieldInvalid" ),
109 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_FieldRo , "FieldRo" ),
110 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_LinkPtrInvalid , "LinkPtrInvalid" ),
111 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_LongModeCS , "LongModeCS" ),
112 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_PtrInvalid , "PtrInvalid" ),
113 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_PtrMap , "PtrMap" ),
114 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_RealOrV86Mode , "RealOrV86Mode" ),
115 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_Success , "Success" ),
116 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_VmxRoot , "VmxRoot" ),
117 /* VMREAD. */
118 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_Cpl , "Cpl" ),
119 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_FieldInvalid , "FieldInvalid" ),
120 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_LinkPtrInvalid , "LinkPtrInvalid" ),
121 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_LongModeCS , "LongModeCS" ),
122 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_PtrInvalid , "PtrInvalid" ),
123 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_PtrMap , "PtrMap" ),
124 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_RealOrV86Mode , "RealOrV86Mode" ),
125 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_Success , "Success" ),
126 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_VmxRoot , "VmxRoot" ),
127 /* VMLAUNCH/VMRESUME. */
128 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrApicAccess , "AddrApicAccess" ),
129 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrEntryMsrLoad , "AddrEntryMsrLoad" ),
130 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrExitMsrLoad , "AddrExitMsrLoad" ),
131 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrExitMsrStore , "AddrExitMsrStore" ),
132 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrIoBitmapA , "AddrIoBitmapA" ),
133 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrIoBitmapB , "AddrIoBitmapB" ),
134 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrMsrBitmap , "AddrMsrBitmap" ),
135 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrVirtApicPage , "AddrVirtApicPage" ),
136 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrVmreadBitmap , "AddrVmreadBitmap" ),
137 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrVmwriteBitmap , "AddrVmwriteBitmap" ),
138 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ApicRegVirt , "ApicRegVirt" ),
139 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_BlocKMovSS , "BlockMovSS" ),
140 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_Cpl , "Cpl" ),
141 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_Cr3TargetCount , "Cr3TargetCount" ),
142 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryCtlsAllowed1 , "EntryCtlsAllowed1" ),
143 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_EntryCtlsDisallowed0 , "EntryCtlsDisallowed0" ),
144 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsAllowed1 , "ExitCtlsAllowed1" ),
145 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ExitCtlsDisallowed0 , "ExitCtlsDisallowed0" ),
146 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_LongModeCS , "LongModeCS" ),
147 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_NmiWindowExit , "NmiWindowExit" ),
148 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_PinCtlsAllowed1 , "PinCtlsAllowed1" ),
149 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_PinCtlsDisallowed0 , "PinCtlsDisallowed0" ),
150 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ProcCtlsAllowed1 , "ProcCtlsAllowed1" ),
151 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ProcCtlsDisallowed0 , "ProcCtlsDisallowed0" ),
152 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ProcCtls2Allowed1 , "ProcCtls2Allowed1" ),
153 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ProcCtls2Disallowed0 , "ProcCtls2Disallowed0" ),
154 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_PtrInvalid , "PtrInvalid" ),
155 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_PtrReadPhys , "PtrReadPhys" ),
156 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_RealOrV86Mode , "RealOrV86Mode" ),
157 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_SavePreemptTimer , "SavePreemptTimer" ),
158 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_Success , "Success" ),
159 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_TprThreshold , "TprThreshold" ),
160 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_TprThresholdVTpr , "TprThresholdVTpr" ),
161 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VirtApicPagePtrReadPhys , "VirtApicPageReadPhys" ),
162 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VirtIntDelivery , "VirtIntDelivery" ),
163 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VirtNmi , "VirtNmi" ),
164 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VirtX2ApicTprShadow , "VirtX2ApicTprShadow" ),
165 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VirtX2ApicVirtApic , "VirtX2ApicVirtApic" ),
166 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VmcsClear , "VmcsClear" ),
167 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VmcsLaunch , "VmcsLaunch" ),
168 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VmxRoot , "VmxRoot" ),
169 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_Vpid , "Vpid" )
170 /* kVmxVInstrDiag_Last */
171};
172#undef VMX_INSTR_DIAG_DESC
173
174
175/**
176 * Gets a copy of the VMX host MSRs that were read by HM during ring-0
177 * initialization.
178 *
179 * @return VBox status code.
180 * @param pVM The cross context VM structure.
181 * @param pVmxMsrs Where to store the VMXMSRS struct (only valid when
182 * VINF_SUCCESS is returned).
183 *
184 * @remarks Caller needs to take care not to call this function too early. Call
185 * after HM initialization is fully complete.
186 */
187VMM_INT_DECL(int) HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs)
188{
189 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
190 AssertPtrReturn(pVmxMsrs, VERR_INVALID_PARAMETER);
191 if (pVM->hm.s.vmx.fSupported)
192 {
193 *pVmxMsrs = pVM->hm.s.vmx.Msrs;
194 return VINF_SUCCESS;
195 }
196 return VERR_VMX_NOT_SUPPORTED;
197}
198
199
200/**
201 * Gets the specified VMX host MSR that was read by HM during ring-0
202 * initialization.
203 *
204 * @return VBox status code.
205 * @param pVM The cross context VM structure.
206 * @param idMsr The MSR.
207 * @param puValue Where to store the MSR value (only updated when VINF_SUCCESS
208 * is returned).
209 *
210 * @remarks Caller needs to take care not to call this function too early. Call
211 * after HM initialization is fully complete.
212 */
213VMM_INT_DECL(int) HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue)
214{
215 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
216 AssertPtrReturn(puValue, VERR_INVALID_PARAMETER);
217
218 if (!pVM->hm.s.vmx.fSupported)
219 return VERR_VMX_NOT_SUPPORTED;
220
221 PCVMXMSRS pVmxMsrs = &pVM->hm.s.vmx.Msrs;
222 switch (idMsr)
223 {
224 case MSR_IA32_FEATURE_CONTROL: *puValue = pVmxMsrs->u64FeatCtrl; break;
225 case MSR_IA32_VMX_BASIC: *puValue = pVmxMsrs->u64Basic; break;
226 case MSR_IA32_VMX_PINBASED_CTLS: *puValue = pVmxMsrs->PinCtls.u; break;
227 case MSR_IA32_VMX_PROCBASED_CTLS: *puValue = pVmxMsrs->ProcCtls.u; break;
228 case MSR_IA32_VMX_PROCBASED_CTLS2: *puValue = pVmxMsrs->ProcCtls2.u; break;
229 case MSR_IA32_VMX_EXIT_CTLS: *puValue = pVmxMsrs->ExitCtls.u; break;
230 case MSR_IA32_VMX_ENTRY_CTLS: *puValue = pVmxMsrs->EntryCtls.u; break;
231 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: *puValue = pVmxMsrs->TruePinCtls.u; break;
232 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *puValue = pVmxMsrs->TrueProcCtls.u; break;
233 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: *puValue = pVmxMsrs->TrueEntryCtls.u; break;
234 case MSR_IA32_VMX_TRUE_EXIT_CTLS: *puValue = pVmxMsrs->TrueExitCtls.u; break;
235 case MSR_IA32_VMX_MISC: *puValue = pVmxMsrs->u64Misc; break;
236 case MSR_IA32_VMX_CR0_FIXED0: *puValue = pVmxMsrs->u64Cr0Fixed0; break;
237 case MSR_IA32_VMX_CR0_FIXED1: *puValue = pVmxMsrs->u64Cr0Fixed1; break;
238 case MSR_IA32_VMX_CR4_FIXED0: *puValue = pVmxMsrs->u64Cr4Fixed0; break;
239 case MSR_IA32_VMX_CR4_FIXED1: *puValue = pVmxMsrs->u64Cr4Fixed1; break;
240 case MSR_IA32_VMX_VMCS_ENUM: *puValue = pVmxMsrs->u64VmcsEnum; break;
241 case MSR_IA32_VMX_VMFUNC: *puValue = pVmxMsrs->u64VmFunc; break;
242 case MSR_IA32_VMX_EPT_VPID_CAP: *puValue = pVmxMsrs->u64EptVpidCaps; break;
243 default:
244 {
245 AssertMsgFailed(("Invalid MSR %#x\n", idMsr));
246 return VERR_NOT_FOUND;
247 }
248 }
249 return VINF_SUCCESS;
250}
251
252
253/**
254 * Gets the description of a VMX instruction diagnostic enum member.
255 *
256 * @returns The descriptive string.
257 * @param enmInstrDiag The VMX instruction diagnostic.
258 */
259VMM_INT_DECL(const char *) HMVmxGetInstrDiagDesc(VMXVINSTRDIAG enmInstrDiag)
260{
261 if (RT_LIKELY((unsigned)enmInstrDiag < RT_ELEMENTS(g_apszVmxInstrDiagDesc)))
262 return g_apszVmxInstrDiagDesc[enmInstrDiag];
263 return "Unknown/invalid";
264}
265
266
267/**
268 * Checks if a code selector (CS) is suitable for execution using hardware-assisted
269 * VMX when unrestricted execution isn't available.
270 *
271 * @returns true if selector is suitable for VMX, otherwise
272 * false.
273 * @param pSel Pointer to the selector to check (CS).
274 * @param uStackDpl The CPL, aka the DPL of the stack segment.
275 */
276static bool hmVmxIsCodeSelectorOk(PCCPUMSELREG pSel, unsigned uStackDpl)
277{
278 /*
279 * Segment must be an accessed code segment, it must be present and it must
280 * be usable.
281 * Note! These are all standard requirements and if CS holds anything else
282 * we've got buggy code somewhere!
283 */
284 AssertCompile(X86DESCATTR_TYPE == 0xf);
285 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P | X86DESCATTR_UNUSABLE))
286 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P),
287 ("%#x\n", pSel->Attr.u),
288 false);
289
290 /* For conforming segments, CS.DPL must be <= SS.DPL, while CS.DPL
291 must equal SS.DPL for non-confroming segments.
292 Note! This is also a hard requirement like above. */
293 AssertMsgReturn( pSel->Attr.n.u4Type & X86_SEL_TYPE_CONF
294 ? pSel->Attr.n.u2Dpl <= uStackDpl
295 : pSel->Attr.n.u2Dpl == uStackDpl,
296 ("u4Type=%#x u2Dpl=%u uStackDpl=%u\n", pSel->Attr.n.u4Type, pSel->Attr.n.u2Dpl, uStackDpl),
297 false);
298
299 /*
300 * The following two requirements are VT-x specific:
301 * - G bit must be set if any high limit bits are set.
302 * - G bit must be clear if any low limit bits are clear.
303 */
304 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
305 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
306 return true;
307 return false;
308}
309
310
311/**
312 * Checks if a data selector (DS/ES/FS/GS) is suitable for execution using
313 * hardware-assisted VMX when unrestricted execution isn't available.
314 *
315 * @returns true if selector is suitable for VMX, otherwise
316 * false.
317 * @param pSel Pointer to the selector to check
318 * (DS/ES/FS/GS).
319 */
320static bool hmVmxIsDataSelectorOk(PCCPUMSELREG pSel)
321{
322 /*
323 * Unusable segments are OK. These days they should be marked as such, as
324 * but as an alternative we for old saved states and AMD<->VT-x migration
325 * we also treat segments with all the attributes cleared as unusable.
326 */
327 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
328 return true;
329
330 /** @todo tighten these checks. Will require CPUM load adjusting. */
331
332 /* Segment must be accessed. */
333 if (pSel->Attr.u & X86_SEL_TYPE_ACCESSED)
334 {
335 /* Code segments must also be readable. */
336 if ( !(pSel->Attr.u & X86_SEL_TYPE_CODE)
337 || (pSel->Attr.u & X86_SEL_TYPE_READ))
338 {
339 /* The S bit must be set. */
340 if (pSel->Attr.n.u1DescType)
341 {
342 /* Except for conforming segments, DPL >= RPL. */
343 if ( pSel->Attr.n.u2Dpl >= (pSel->Sel & X86_SEL_RPL)
344 || pSel->Attr.n.u4Type >= X86_SEL_TYPE_ER_ACC)
345 {
346 /* Segment must be present. */
347 if (pSel->Attr.n.u1Present)
348 {
349 /*
350 * The following two requirements are VT-x specific:
351 * - G bit must be set if any high limit bits are set.
352 * - G bit must be clear if any low limit bits are clear.
353 */
354 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
355 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
356 return true;
357 }
358 }
359 }
360 }
361 }
362
363 return false;
364}
365
366
367/**
368 * Checks if the stack selector (SS) is suitable for execution using
369 * hardware-assisted VMX when unrestricted execution isn't available.
370 *
371 * @returns true if selector is suitable for VMX, otherwise
372 * false.
373 * @param pSel Pointer to the selector to check (SS).
374 */
375static bool hmVmxIsStackSelectorOk(PCCPUMSELREG pSel)
376{
377 /*
378 * Unusable segments are OK. These days they should be marked as such, as
379 * but as an alternative we for old saved states and AMD<->VT-x migration
380 * we also treat segments with all the attributes cleared as unusable.
381 */
382 /** @todo r=bird: actually all zeroes isn't gonna cut it... SS.DPL == CPL. */
383 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
384 return true;
385
386 /*
387 * Segment must be an accessed writable segment, it must be present.
388 * Note! These are all standard requirements and if SS holds anything else
389 * we've got buggy code somewhere!
390 */
391 AssertCompile(X86DESCATTR_TYPE == 0xf);
392 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P | X86_SEL_TYPE_CODE))
393 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P),
394 ("%#x\n", pSel->Attr.u), false);
395
396 /* DPL must equal RPL.
397 Note! This is also a hard requirement like above. */
398 AssertMsgReturn(pSel->Attr.n.u2Dpl == (pSel->Sel & X86_SEL_RPL),
399 ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel), false);
400
401 /*
402 * The following two requirements are VT-x specific:
403 * - G bit must be set if any high limit bits are set.
404 * - G bit must be clear if any low limit bits are clear.
405 */
406 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
407 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
408 return true;
409 return false;
410}
411
412
413/**
414 * Checks if the guest is in a suitable state for hardware-assisted VMX execution.
415 *
416 * @returns @c true if it is suitable, @c false otherwise.
417 * @param pVCpu The cross context virtual CPU structure.
418 * @param pCtx Pointer to the guest CPU context.
419 *
420 * @remarks @a pCtx can be a partial context and thus may not be necessarily the
421 * same as pVCpu->cpum.GstCtx! Thus don't eliminate the @a pCtx parameter.
422 * Secondly, if additional checks are added that require more of the CPU
423 * state, make sure REM (which supplies a partial state) is updated.
424 */
425VMM_INT_DECL(bool) HMVmxCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx)
426{
427 PVM pVM = pVCpu->CTX_SUFF(pVM);
428 Assert(HMIsEnabled(pVM));
429 Assert(!CPUMIsGuestVmxEnabled(pCtx));
430 Assert( ( pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS)
431 || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));
432
433 pVCpu->hm.s.fActive = false;
434
435 bool const fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVmmDevHeapIsEnabled(pVM);
436 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
437 {
438 /*
439 * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted
440 * guest execution feature is missing (VT-x only).
441 */
442 if (fSupportsRealMode)
443 {
444 if (CPUMIsGuestInRealModeEx(pCtx))
445 {
446 /*
447 * In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
448 * bases and limits, i.e. limit must be 64K and base must be selector * 16.
449 * If this is not true, we cannot execute real mode as V86 and have to fall
450 * back to emulation.
451 */
452 if ( pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)
453 || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)
454 || pCtx->es.Sel != (pCtx->es.u64Base >> 4)
455 || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)
456 || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)
457 || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4))
458 {
459 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelBase);
460 return false;
461 }
462 if ( (pCtx->cs.u32Limit != 0xffff)
463 || (pCtx->ds.u32Limit != 0xffff)
464 || (pCtx->es.u32Limit != 0xffff)
465 || (pCtx->ss.u32Limit != 0xffff)
466 || (pCtx->fs.u32Limit != 0xffff)
467 || (pCtx->gs.u32Limit != 0xffff))
468 {
469 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit);
470 return false;
471 }
472 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckRmOk);
473 }
474 else
475 {
476 /*
477 * Verify the requirements for executing code in protected mode. VT-x can't
478 * handle the CPU state right after a switch from real to protected mode
479 * (all sorts of RPL & DPL assumptions).
480 */
481 if (pVCpu->hm.s.vmx.fWasInRealMode)
482 {
483 /** @todo If guest is in V86 mode, these checks should be different! */
484 if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL))
485 {
486 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRpl);
487 return false;
488 }
489 if ( !hmVmxIsCodeSelectorOk(&pCtx->cs, pCtx->ss.Attr.n.u2Dpl)
490 || !hmVmxIsDataSelectorOk(&pCtx->ds)
491 || !hmVmxIsDataSelectorOk(&pCtx->es)
492 || !hmVmxIsDataSelectorOk(&pCtx->fs)
493 || !hmVmxIsDataSelectorOk(&pCtx->gs)
494 || !hmVmxIsStackSelectorOk(&pCtx->ss))
495 {
496 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
497 return false;
498 }
499 }
500 /* VT-x also chokes on invalid TR or LDTR selectors (minix). */
501 if (pCtx->gdtr.cbGdt)
502 {
503 if ((pCtx->tr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
504 {
505 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadTr);
506 return false;
507 }
508 else if ((pCtx->ldtr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
509 {
510 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadLdt);
511 return false;
512 }
513 }
514 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckPmOk);
515 }
516 }
517 else
518 {
519 if ( !CPUMIsGuestInLongModeEx(pCtx)
520 && !pVM->hm.s.vmx.fUnrestrictedGuest)
521 {
522 if ( !pVM->hm.s.fNestedPaging /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
523 || CPUMIsGuestInRealModeEx(pCtx)) /* Requires a fake TSS for real mode - stored in the VMM device heap */
524 return false;
525
526 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
527 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0)
528 return false;
529
530 /*
531 * The guest is about to complete the switch to protected mode. Wait a bit longer.
532 * Windows XP; switch to protected mode; all selectors are marked not present
533 * in the hidden registers (possible recompiler bug; see load_seg_vm).
534 */
535 /** @todo Is this supposed recompiler bug still relevant with IEM? */
536 if (pCtx->cs.Attr.n.u1Present == 0)
537 return false;
538 if (pCtx->ss.Attr.n.u1Present == 0)
539 return false;
540
541 /*
542 * Windows XP: possible same as above, but new recompiler requires new
543 * heuristics? VT-x doesn't seem to like something about the guest state and
544 * this stuff avoids it.
545 */
546 /** @todo This check is actually wrong, it doesn't take the direction of the
547 * stack segment into account. But, it does the job for now. */
548 if (pCtx->rsp >= pCtx->ss.u32Limit)
549 return false;
550 }
551 }
552 }
553
554 if (pVM->hm.s.vmx.fEnabled)
555 {
556 uint32_t uCr0Mask;
557
558 /* If bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
559 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr0Fixed0;
560
561 /* We ignore the NE bit here on purpose; see HMR0.cpp for details. */
562 uCr0Mask &= ~X86_CR0_NE;
563
564 if (fSupportsRealMode)
565 {
566 /* We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
567 uCr0Mask &= ~(X86_CR0_PG|X86_CR0_PE);
568 }
569 else
570 {
571 /* We support protected mode without paging using identity mapping. */
572 uCr0Mask &= ~X86_CR0_PG;
573 }
574 if ((pCtx->cr0 & uCr0Mask) != uCr0Mask)
575 return false;
576
577 /* If bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
578 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
579 if ((pCtx->cr0 & uCr0Mask) != 0)
580 return false;
581
582 /* If bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
583 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr4Fixed0;
584 uCr0Mask &= ~X86_CR4_VMXE;
585 if ((pCtx->cr4 & uCr0Mask) != uCr0Mask)
586 return false;
587
588 /* If bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
589 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
590 if ((pCtx->cr4 & uCr0Mask) != 0)
591 return false;
592
593 pVCpu->hm.s.fActive = true;
594 return true;
595 }
596
597 return false;
598}
599
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette