VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp@ 74054

Last change on this file since 74054 was 74054, checked in by vboxsync, 6 years ago

VMM: Nested VMX: bugref:9180 vmlaunch/vmresume bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 31.6 KB
Line 
1/* $Id: HMVMXAll.cpp 74054 2018-09-04 05:23:52Z vboxsync $ */
2/** @file
3 * HM VMX (VT-x) - All contexts.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include "HMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/pdmapi.h>
27
28
29/*********************************************************************************************************************************
30* Global Variables *
31*********************************************************************************************************************************/
32#define VMX_INSTR_DIAG_DESC(a_Def, a_Desc) #a_Def " - " #a_Desc
33static const char * const g_apszVmxInstrDiagDesc[kVmxVInstrDiag_Last] =
34{
35 /* Internal processing errors. */
36 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_1 , "Ipe_1" ),
37 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_2 , "Ipe_2" ),
38 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_3 , "Ipe_3" ),
39 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_4 , "Ipe_4" ),
40 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_5 , "Ipe_5" ),
41 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_6 , "Ipe_6" ),
42 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_7 , "Ipe_7" ),
43 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_8 , "Ipe_8" ),
44 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_9 , "Ipe_9" ),
45 /* VMXON. */
46 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_A20M , "A20M" ),
47 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cpl , "Cpl" ),
48 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr0Fixed0 , "Cr0Fixed0" ),
49 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr4Fixed0 , "Cr4Fixed0" ),
50 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Intercept , "Intercept" ),
51 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_LongModeCS , "LongModeCS" ),
52 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_MsrFeatCtl , "MsrFeatCtl" ),
53 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAbnormal , "PtrAbnormal" ),
54 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAlign , "PtrAlign" ),
55 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrMap , "PtrMap" ),
56 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrPhysRead , "PtrPhysRead" ),
57 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrWidth , "PtrWidth" ),
58 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_RealOrV86Mode , "RealOrV86Mode" ),
59 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Success , "Success" ),
60 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_ShadowVmcs , "ShadowVmcs" ),
61 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxAlreadyRoot , "VmxAlreadyRoot" ),
62 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Vmxe , "Vmxe" ),
63 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmcsRevId , "VmcsRevId" ),
64 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxRootCpl , "VmxRootCpl" ),
65 /* VMXOFF. */
66 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Cpl , "Cpl" ),
67 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Intercept , "Intercept" ),
68 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_LongModeCS , "LongModeCS" ),
69 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_RealOrV86Mode , "RealOrV86Mode" ),
70 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Success , "Success" ),
71 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Vmxe , "Vmxe" ),
72 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_VmxRoot , "VmxRoot" ),
73 /* VMPTRLD. */
74 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_Cpl , "Cpl" ),
75 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_LongModeCS , "LongModeCS" ),
76 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrAbnormal , "PtrAbnormal" ),
77 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrAlign , "PtrAlign" ),
78 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrMap , "PtrMap" ),
79 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrReadPhys , "PtrReadPhys" ),
80 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrVmxon , "PtrVmxon" ),
81 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrWidth , "PtrWidth" ),
82 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_RealOrV86Mode , "RealOrV86Mode" ),
83 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_ShadowVmcs , "ShadowVmcs" ),
84 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_Success , "Success" ),
85 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_VmcsRevId , "VmcsRevId" ),
86 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_VmxRoot , "VmxRoot" ),
87 /* VMPTRST. */
88 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_Cpl , "Cpl" ),
89 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_LongModeCS , "LongModeCS" ),
90 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_PtrMap , "PtrMap" ),
91 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_RealOrV86Mode , "RealOrV86Mode" ),
92 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_Success , "Success" ),
93 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_VmxRoot , "VmxRoot" ),
94 /* VMCLEAR. */
95 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Cpl , "Cpl" ),
96 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_LongModeCS , "LongModeCS" ),
97 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrAbnormal , "PtrAbnormal" ),
98 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrAlign , "PtrAlign" ),
99 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrMap , "PtrMap" ),
100 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrReadPhys , "PtrReadPhys" ),
101 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrVmxon , "PtrVmxon" ),
102 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrWidth , "PtrWidth" ),
103 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_RealOrV86Mode , "RealOrV86Mode" ),
104 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Success , "Success" ),
105 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_VmxRoot , "VmxRoot" ),
106 /* VMWRITE. */
107 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_Cpl , "Cpl" ),
108 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_FieldInvalid , "FieldInvalid" ),
109 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_FieldRo , "FieldRo" ),
110 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_LinkPtrInvalid , "LinkPtrInvalid" ),
111 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_LongModeCS , "LongModeCS" ),
112 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_PtrInvalid , "PtrInvalid" ),
113 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_PtrMap , "PtrMap" ),
114 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_RealOrV86Mode , "RealOrV86Mode" ),
115 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_Success , "Success" ),
116 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_VmxRoot , "VmxRoot" ),
117 /* VMREAD. */
118 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_Cpl , "Cpl" ),
119 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_FieldInvalid , "FieldInvalid" ),
120 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_LinkPtrInvalid , "LinkPtrInvalid" ),
121 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_LongModeCS , "LongModeCS" ),
122 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_PtrInvalid , "PtrInvalid" ),
123 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_PtrMap , "PtrMap" ),
124 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_RealOrV86Mode , "RealOrV86Mode" ),
125 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_Success , "Success" ),
126 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_VmxRoot , "VmxRoot" ),
127 /* VMLAUNCH/VMRESUME. */
128 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrApicAccess , "AddrApicAccess" ),
129 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrIoBitmapA , "AddrIoBitmapA" ),
130 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrIoBitmapB , "AddrIoBitmapB" ),
131 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrMsrBitmap , "AddrMsrBitmap" ),
132 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrVirtApicPage , "AddrVirtApicPage" ),
133 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrVmreadBitmap , "AddrVmreadBitmap" ),
134 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_AddrVmwriteBitmap , "AddrVmwriteBitmap" ),
135 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ApicRegVirt , "ApicRegVirt" ),
136 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_BlocKMovSS , "BlockMovSS" ),
137 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_Cpl , "Cpl" ),
138 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_Cr3TargetCount , "Cr3TargetCount" ),
139 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_LongModeCS , "LongModeCS" ),
140 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_NmiWindowExit , "NmiWindowExit" ),
141 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_PinCtlsAllowed1 , "PinCtlsAllowed1" ),
142 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_PinCtlsDisallowed0 , "PinCtlsDisallowed0" ),
143 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ProcCtlsAllowed1 , "ProcCtlsAllowed1" ),
144 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ProcCtlsDisallowed0 , "ProcCtlsDisallowed0" ),
145 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ProcCtls2Allowed1 , "ProcCtls2Allowed1" ),
146 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_ProcCtls2Disallowed0 , "ProcCtls2Disallowed0" ),
147 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_PtrInvalid , "PtrInvalid" ),
148 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_PtrReadPhys , "PtrReadPhys" ),
149 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_RealOrV86Mode , "RealOrV86Mode" ),
150 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_Success , "Success" ),
151 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_TprThreshold , "TprThreshold" ),
152 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_TprThresholdVTpr , "TprThresholdVTpr" ),
153 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VirtApicPagePtrReadPhys , "VirtApicPageReadPhys" ),
154 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VirtIntDelivery , "VirtIntDelivery" ),
155 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VirtNmi , "VirtNmi" ),
156 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VirtX2ApicTprShadow , "VirtX2ApicTprShadow" ),
157 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VirtX2ApicVirtApic , "VirtX2ApicVirtApic" ),
158 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VmcsClear , "VmcsClear" ),
159 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VmcsLaunch , "VmcsLaunch" ),
160 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_VmxRoot , "VmxRoot" ),
161 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmentry_Vpid , "Vpid" )
162 /* kVmxVInstrDiag_Last */
163};
164#undef VMX_INSTR_DIAG_DESC
165
166
167/**
168 * Gets a copy of the VMX host MSRs that were read by HM during ring-0
169 * initialization.
170 *
171 * @return VBox status code.
172 * @param pVM The cross context VM structure.
173 * @param pVmxMsrs Where to store the VMXMSRS struct (only valid when
174 * VINF_SUCCESS is returned).
175 *
176 * @remarks Caller needs to take care not to call this function too early. Call
177 * after HM initialization is fully complete.
178 */
179VMM_INT_DECL(int) HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs)
180{
181 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
182 AssertPtrReturn(pVmxMsrs, VERR_INVALID_PARAMETER);
183 if (pVM->hm.s.vmx.fSupported)
184 {
185 *pVmxMsrs = pVM->hm.s.vmx.Msrs;
186 return VINF_SUCCESS;
187 }
188 return VERR_VMX_NOT_SUPPORTED;
189}
190
191
192/**
193 * Gets the specified VMX host MSR that was read by HM during ring-0
194 * initialization.
195 *
196 * @return VBox status code.
197 * @param pVM The cross context VM structure.
198 * @param idMsr The MSR.
199 * @param puValue Where to store the MSR value (only updated when VINF_SUCCESS
200 * is returned).
201 *
202 * @remarks Caller needs to take care not to call this function too early. Call
203 * after HM initialization is fully complete.
204 */
205VMM_INT_DECL(int) HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue)
206{
207 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
208 AssertPtrReturn(puValue, VERR_INVALID_PARAMETER);
209
210 if (!pVM->hm.s.vmx.fSupported)
211 return VERR_VMX_NOT_SUPPORTED;
212
213 PCVMXMSRS pVmxMsrs = &pVM->hm.s.vmx.Msrs;
214 switch (idMsr)
215 {
216 case MSR_IA32_FEATURE_CONTROL: *puValue = pVmxMsrs->u64FeatCtrl; break;
217 case MSR_IA32_VMX_BASIC: *puValue = pVmxMsrs->u64Basic; break;
218 case MSR_IA32_VMX_PINBASED_CTLS: *puValue = pVmxMsrs->PinCtls.u; break;
219 case MSR_IA32_VMX_PROCBASED_CTLS: *puValue = pVmxMsrs->ProcCtls.u; break;
220 case MSR_IA32_VMX_PROCBASED_CTLS2: *puValue = pVmxMsrs->ProcCtls2.u; break;
221 case MSR_IA32_VMX_EXIT_CTLS: *puValue = pVmxMsrs->ExitCtls.u; break;
222 case MSR_IA32_VMX_ENTRY_CTLS: *puValue = pVmxMsrs->EntryCtls.u; break;
223 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: *puValue = pVmxMsrs->TruePinCtls.u; break;
224 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *puValue = pVmxMsrs->TrueProcCtls.u; break;
225 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: *puValue = pVmxMsrs->TrueEntryCtls.u; break;
226 case MSR_IA32_VMX_TRUE_EXIT_CTLS: *puValue = pVmxMsrs->TrueExitCtls.u; break;
227 case MSR_IA32_VMX_MISC: *puValue = pVmxMsrs->u64Misc; break;
228 case MSR_IA32_VMX_CR0_FIXED0: *puValue = pVmxMsrs->u64Cr0Fixed0; break;
229 case MSR_IA32_VMX_CR0_FIXED1: *puValue = pVmxMsrs->u64Cr0Fixed1; break;
230 case MSR_IA32_VMX_CR4_FIXED0: *puValue = pVmxMsrs->u64Cr4Fixed0; break;
231 case MSR_IA32_VMX_CR4_FIXED1: *puValue = pVmxMsrs->u64Cr4Fixed1; break;
232 case MSR_IA32_VMX_VMCS_ENUM: *puValue = pVmxMsrs->u64VmcsEnum; break;
233 case MSR_IA32_VMX_VMFUNC: *puValue = pVmxMsrs->u64VmFunc; break;
234 case MSR_IA32_VMX_EPT_VPID_CAP: *puValue = pVmxMsrs->u64EptVpidCaps; break;
235 default:
236 {
237 AssertMsgFailed(("Invalid MSR %#x\n", idMsr));
238 return VERR_NOT_FOUND;
239 }
240 }
241 return VINF_SUCCESS;
242}
243
244
245/**
246 * Gets the description of a VMX instruction diagnostic enum member.
247 *
248 * @returns The descriptive string.
249 * @param enmInstrDiag The VMX instruction diagnostic.
250 */
251VMM_INT_DECL(const char *) HMVmxGetInstrDiagDesc(VMXVINSTRDIAG enmInstrDiag)
252{
253 if (RT_LIKELY((unsigned)enmInstrDiag < RT_ELEMENTS(g_apszVmxInstrDiagDesc)))
254 return g_apszVmxInstrDiagDesc[enmInstrDiag];
255 return "Unknown/invalid";
256}
257
258
259/**
260 * Checks if a code selector (CS) is suitable for execution using hardware-assisted
261 * VMX when unrestricted execution isn't available.
262 *
263 * @returns true if selector is suitable for VMX, otherwise
264 * false.
265 * @param pSel Pointer to the selector to check (CS).
266 * @param uStackDpl The CPL, aka the DPL of the stack segment.
267 */
268static bool hmVmxIsCodeSelectorOk(PCCPUMSELREG pSel, unsigned uStackDpl)
269{
270 /*
271 * Segment must be an accessed code segment, it must be present and it must
272 * be usable.
273 * Note! These are all standard requirements and if CS holds anything else
274 * we've got buggy code somewhere!
275 */
276 AssertCompile(X86DESCATTR_TYPE == 0xf);
277 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P | X86DESCATTR_UNUSABLE))
278 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P),
279 ("%#x\n", pSel->Attr.u),
280 false);
281
282 /* For conforming segments, CS.DPL must be <= SS.DPL, while CS.DPL
283 must equal SS.DPL for non-confroming segments.
284 Note! This is also a hard requirement like above. */
285 AssertMsgReturn( pSel->Attr.n.u4Type & X86_SEL_TYPE_CONF
286 ? pSel->Attr.n.u2Dpl <= uStackDpl
287 : pSel->Attr.n.u2Dpl == uStackDpl,
288 ("u4Type=%#x u2Dpl=%u uStackDpl=%u\n", pSel->Attr.n.u4Type, pSel->Attr.n.u2Dpl, uStackDpl),
289 false);
290
291 /*
292 * The following two requirements are VT-x specific:
293 * - G bit must be set if any high limit bits are set.
294 * - G bit must be clear if any low limit bits are clear.
295 */
296 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
297 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
298 return true;
299 return false;
300}
301
302
303/**
304 * Checks if a data selector (DS/ES/FS/GS) is suitable for execution using
305 * hardware-assisted VMX when unrestricted execution isn't available.
306 *
307 * @returns true if selector is suitable for VMX, otherwise
308 * false.
309 * @param pSel Pointer to the selector to check
310 * (DS/ES/FS/GS).
311 */
312static bool hmVmxIsDataSelectorOk(PCCPUMSELREG pSel)
313{
314 /*
315 * Unusable segments are OK. These days they should be marked as such, as
316 * but as an alternative we for old saved states and AMD<->VT-x migration
317 * we also treat segments with all the attributes cleared as unusable.
318 */
319 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
320 return true;
321
322 /** @todo tighten these checks. Will require CPUM load adjusting. */
323
324 /* Segment must be accessed. */
325 if (pSel->Attr.u & X86_SEL_TYPE_ACCESSED)
326 {
327 /* Code segments must also be readable. */
328 if ( !(pSel->Attr.u & X86_SEL_TYPE_CODE)
329 || (pSel->Attr.u & X86_SEL_TYPE_READ))
330 {
331 /* The S bit must be set. */
332 if (pSel->Attr.n.u1DescType)
333 {
334 /* Except for conforming segments, DPL >= RPL. */
335 if ( pSel->Attr.n.u2Dpl >= (pSel->Sel & X86_SEL_RPL)
336 || pSel->Attr.n.u4Type >= X86_SEL_TYPE_ER_ACC)
337 {
338 /* Segment must be present. */
339 if (pSel->Attr.n.u1Present)
340 {
341 /*
342 * The following two requirements are VT-x specific:
343 * - G bit must be set if any high limit bits are set.
344 * - G bit must be clear if any low limit bits are clear.
345 */
346 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
347 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
348 return true;
349 }
350 }
351 }
352 }
353 }
354
355 return false;
356}
357
358
359/**
360 * Checks if the stack selector (SS) is suitable for execution using
361 * hardware-assisted VMX when unrestricted execution isn't available.
362 *
363 * @returns true if selector is suitable for VMX, otherwise
364 * false.
365 * @param pSel Pointer to the selector to check (SS).
366 */
367static bool hmVmxIsStackSelectorOk(PCCPUMSELREG pSel)
368{
369 /*
370 * Unusable segments are OK. These days they should be marked as such, as
371 * but as an alternative we for old saved states and AMD<->VT-x migration
372 * we also treat segments with all the attributes cleared as unusable.
373 */
374 /** @todo r=bird: actually all zeroes isn't gonna cut it... SS.DPL == CPL. */
375 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
376 return true;
377
378 /*
379 * Segment must be an accessed writable segment, it must be present.
380 * Note! These are all standard requirements and if SS holds anything else
381 * we've got buggy code somewhere!
382 */
383 AssertCompile(X86DESCATTR_TYPE == 0xf);
384 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P | X86_SEL_TYPE_CODE))
385 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P),
386 ("%#x\n", pSel->Attr.u), false);
387
388 /* DPL must equal RPL.
389 Note! This is also a hard requirement like above. */
390 AssertMsgReturn(pSel->Attr.n.u2Dpl == (pSel->Sel & X86_SEL_RPL),
391 ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel), false);
392
393 /*
394 * The following two requirements are VT-x specific:
395 * - G bit must be set if any high limit bits are set.
396 * - G bit must be clear if any low limit bits are clear.
397 */
398 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
399 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
400 return true;
401 return false;
402}
403
404
405/**
406 * Checks if the guest is in a suitable state for hardware-assisted VMX execution.
407 *
408 * @returns @c true if it is suitable, @c false otherwise.
409 * @param pVCpu The cross context virtual CPU structure.
410 * @param pCtx Pointer to the guest CPU context.
411 *
412 * @remarks @a pCtx can be a partial context and thus may not be necessarily the
413 * same as pVCpu->cpum.GstCtx! Thus don't eliminate the @a pCtx parameter.
414 * Secondly, if additional checks are added that require more of the CPU
415 * state, make sure REM (which supplies a partial state) is updated.
416 */
417VMM_INT_DECL(bool) HMVmxCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx)
418{
419 PVM pVM = pVCpu->CTX_SUFF(pVM);
420 Assert(HMIsEnabled(pVM));
421 Assert(!CPUMIsGuestVmxEnabled(pCtx));
422 Assert( ( pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS)
423 || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));
424
425 pVCpu->hm.s.fActive = false;
426
427 bool const fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVmmDevHeapIsEnabled(pVM);
428 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
429 {
430 /*
431 * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted
432 * guest execution feature is missing (VT-x only).
433 */
434 if (fSupportsRealMode)
435 {
436 if (CPUMIsGuestInRealModeEx(pCtx))
437 {
438 /*
439 * In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
440 * bases and limits, i.e. limit must be 64K and base must be selector * 16.
441 * If this is not true, we cannot execute real mode as V86 and have to fall
442 * back to emulation.
443 */
444 if ( pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)
445 || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)
446 || pCtx->es.Sel != (pCtx->es.u64Base >> 4)
447 || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)
448 || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)
449 || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4))
450 {
451 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelBase);
452 return false;
453 }
454 if ( (pCtx->cs.u32Limit != 0xffff)
455 || (pCtx->ds.u32Limit != 0xffff)
456 || (pCtx->es.u32Limit != 0xffff)
457 || (pCtx->ss.u32Limit != 0xffff)
458 || (pCtx->fs.u32Limit != 0xffff)
459 || (pCtx->gs.u32Limit != 0xffff))
460 {
461 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit);
462 return false;
463 }
464 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckRmOk);
465 }
466 else
467 {
468 /*
469 * Verify the requirements for executing code in protected mode. VT-x can't
470 * handle the CPU state right after a switch from real to protected mode
471 * (all sorts of RPL & DPL assumptions).
472 */
473 if (pVCpu->hm.s.vmx.fWasInRealMode)
474 {
475 /** @todo If guest is in V86 mode, these checks should be different! */
476 if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL))
477 {
478 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRpl);
479 return false;
480 }
481 if ( !hmVmxIsCodeSelectorOk(&pCtx->cs, pCtx->ss.Attr.n.u2Dpl)
482 || !hmVmxIsDataSelectorOk(&pCtx->ds)
483 || !hmVmxIsDataSelectorOk(&pCtx->es)
484 || !hmVmxIsDataSelectorOk(&pCtx->fs)
485 || !hmVmxIsDataSelectorOk(&pCtx->gs)
486 || !hmVmxIsStackSelectorOk(&pCtx->ss))
487 {
488 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
489 return false;
490 }
491 }
492 /* VT-x also chokes on invalid TR or LDTR selectors (minix). */
493 if (pCtx->gdtr.cbGdt)
494 {
495 if ((pCtx->tr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
496 {
497 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadTr);
498 return false;
499 }
500 else if ((pCtx->ldtr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
501 {
502 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadLdt);
503 return false;
504 }
505 }
506 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckPmOk);
507 }
508 }
509 else
510 {
511 if ( !CPUMIsGuestInLongModeEx(pCtx)
512 && !pVM->hm.s.vmx.fUnrestrictedGuest)
513 {
514 if ( !pVM->hm.s.fNestedPaging /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
515 || CPUMIsGuestInRealModeEx(pCtx)) /* Requires a fake TSS for real mode - stored in the VMM device heap */
516 return false;
517
518 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
519 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0)
520 return false;
521
522 /*
523 * The guest is about to complete the switch to protected mode. Wait a bit longer.
524 * Windows XP; switch to protected mode; all selectors are marked not present
525 * in the hidden registers (possible recompiler bug; see load_seg_vm).
526 */
527 /** @todo Is this supposed recompiler bug still relevant with IEM? */
528 if (pCtx->cs.Attr.n.u1Present == 0)
529 return false;
530 if (pCtx->ss.Attr.n.u1Present == 0)
531 return false;
532
533 /*
534 * Windows XP: possible same as above, but new recompiler requires new
535 * heuristics? VT-x doesn't seem to like something about the guest state and
536 * this stuff avoids it.
537 */
538 /** @todo This check is actually wrong, it doesn't take the direction of the
539 * stack segment into account. But, it does the job for now. */
540 if (pCtx->rsp >= pCtx->ss.u32Limit)
541 return false;
542 }
543 }
544 }
545
546 if (pVM->hm.s.vmx.fEnabled)
547 {
548 uint32_t uCr0Mask;
549
550 /* If bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
551 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr0Fixed0;
552
553 /* We ignore the NE bit here on purpose; see HMR0.cpp for details. */
554 uCr0Mask &= ~X86_CR0_NE;
555
556 if (fSupportsRealMode)
557 {
558 /* We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
559 uCr0Mask &= ~(X86_CR0_PG|X86_CR0_PE);
560 }
561 else
562 {
563 /* We support protected mode without paging using identity mapping. */
564 uCr0Mask &= ~X86_CR0_PG;
565 }
566 if ((pCtx->cr0 & uCr0Mask) != uCr0Mask)
567 return false;
568
569 /* If bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
570 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
571 if ((pCtx->cr0 & uCr0Mask) != 0)
572 return false;
573
574 /* If bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
575 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr4Fixed0;
576 uCr0Mask &= ~X86_CR4_VMXE;
577 if ((pCtx->cr4 & uCr0Mask) != uCr0Mask)
578 return false;
579
580 /* If bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
581 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
582 if ((pCtx->cr4 & uCr0Mask) != 0)
583 return false;
584
585 pVCpu->hm.s.fActive = true;
586 return true;
587 }
588
589 return false;
590}
591
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette