VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp@ 73983

Last change on this file since 73983 was 73983, checked in by vboxsync, 6 years ago

VMM/IEM, HM: Nested VMX: bugref:9180 Implement VMREAD, added using decoded IEM APIs for
VMXON, VMREAD, VMWRITE in VMX R0 code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 25.2 KB
Line 
1/* $Id: HMVMXAll.cpp 73983 2018-08-31 08:17:31Z vboxsync $ */
2/** @file
3 * HM VMX (VT-x) - All contexts.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include "HMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/pdmapi.h>
27
28
29/*********************************************************************************************************************************
30* Global Variables *
31*********************************************************************************************************************************/
32#define VMX_INSTR_DIAG_DESC(a_Def, a_Desc) #a_Def " - " #a_Desc
33static const char * const g_apszVmxInstrDiagDesc[kVmxVInstrDiag_Last] =
34{
35 /* Internal processing errors. */
36 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_1 , "Ipe_1" ),
37 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_2 , "Ipe_2" ),
38 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_3 , "Ipe_3" ),
39 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_4 , "Ipe_4" ),
40 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_5 , "Ipe_5" ),
41 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_6 , "Ipe_6" ),
42 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_7 , "Ipe_7" ),
43 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_8 , "Ipe_8" ),
44 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_9 , "Ipe_9" ),
45 /* VMXON. */
46 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_A20M , "A20M" ),
47 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cpl , "Cpl" ),
48 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr0Fixed0 , "Cr0Fixed0" ),
49 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr4Fixed0 , "Cr4Fixed0" ),
50 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Intercept , "Intercept" ),
51 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_LongModeCS , "LongModeCS" ),
52 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_MsrFeatCtl , "MsrFeatCtl" ),
53 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAbnormal , "PtrAbnormal" ),
54 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAlign , "PtrAlign" ),
55 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrMap , "PtrMap" ),
56 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrPhysRead , "PtrPhysRead" ),
57 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrWidth , "PtrWidth" ),
58 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_RealOrV86Mode , "RealOrV86Mode" ),
59 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Success , "Success" ),
60 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_ShadowVmcs , "ShadowVmcs" ),
61 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Vmxe , "Vmxe" ),
62 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmcsRevId , "VmcsRevId" ),
63 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxRoot , "VmxRoot" ),
64 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxRootCpl , "VmxRootCpl" ),
65 /* VMXOFF. */
66 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Cpl , "Cpl" ),
67 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Intercept , "Intercept" ),
68 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_LongModeCS , "LongModeCS" ),
69 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_RealOrV86Mode , "RealOrV86Mode" ),
70 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Success , "Success" ),
71 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Vmxe , "Vmxe" ),
72 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_VmxRoot , "VmxRoot" ),
73 /* VMPTRLD. */
74 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_Cpl , "Cpl" ),
75 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrAbnormal , "PtrAbnormal" ),
76 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrAlign , "PtrAlign" ),
77 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrMap , "PtrMap" ),
78 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrReadPhys , "PtrReadPhys" ),
79 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrVmxon , "PtrVmxon" ),
80 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrWidth , "PtrWidth" ),
81 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_ShadowVmcs , "ShadowVmcs" ),
82 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_Success , "Success" ),
83 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_VmcsRevId , "VmcsRevId" )
84 /* VMPTRST. */
85 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_Cpl , "Cpl" ),
86 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_PtrMap , "PtrMap" ),
87 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_Success , "Success" ),
88 /* VMCLEAR. */
89 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Cpl , "Cpl" ),
90 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrAbnormal , "PtrAbnormal" ),
91 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrAlign , "PtrAlign" ),
92 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrMap , "PtrMap" ),
93 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrReadPhys , "PtrReadPhys" ),
94 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrVmxon , "PtrVmxon" ),
95 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrWidth , "PtrWidth" ),
96 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Success , "Success" ),
97 /* VMWRITE. */
98 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_Cpl , "Cpl" ),
99 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_FieldInvalid , "FieldInvalid" ),
100 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_FieldRo , "FieldRo" ),
101 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_LinkPtrInvalid, "LinkPtrInvalid"),
102 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_PtrInvalid , "PtrInvalid" ),
103 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_PtrMap , "PtrMap" ),
104 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_Success , "Success" ),
105 /* VMREAD. */
106 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_Cpl , "Cpl" ),
107 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_FieldInvalid , "FieldInvalid" ),
108 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_LinkPtrInvalid , "LinkPtrInvalid"),
109 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_PtrInvalid , "PtrInvalid" ),
110 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_PtrMap , "PtrMap" ),
111 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmread_Success , "Success" )
112 /* kVmxVInstrDiag_Last */
113};
114#undef VMX_INSTR_DIAG_DESC
115
116
117/**
118 * Gets a copy of the VMX host MSRs that were read by HM during ring-0
119 * initialization.
120 *
121 * @return VBox status code.
122 * @param pVM The cross context VM structure.
123 * @param pVmxMsrs Where to store the VMXMSRS struct (only valid when
124 * VINF_SUCCESS is returned).
125 *
126 * @remarks Caller needs to take care not to call this function too early. Call
127 * after HM initialization is fully complete.
128 */
129VMM_INT_DECL(int) HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs)
130{
131 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
132 AssertPtrReturn(pVmxMsrs, VERR_INVALID_PARAMETER);
133 if (pVM->hm.s.vmx.fSupported)
134 {
135 *pVmxMsrs = pVM->hm.s.vmx.Msrs;
136 return VINF_SUCCESS;
137 }
138 return VERR_VMX_NOT_SUPPORTED;
139}
140
141
142/**
143 * Gets the specified VMX host MSR that was read by HM during ring-0
144 * initialization.
145 *
146 * @return VBox status code.
147 * @param pVM The cross context VM structure.
148 * @param idMsr The MSR.
149 * @param puValue Where to store the MSR value (only updated when VINF_SUCCESS
150 * is returned).
151 *
152 * @remarks Caller needs to take care not to call this function too early. Call
153 * after HM initialization is fully complete.
154 */
155VMM_INT_DECL(int) HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue)
156{
157 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
158 AssertPtrReturn(puValue, VERR_INVALID_PARAMETER);
159
160 if (!pVM->hm.s.vmx.fSupported)
161 return VERR_VMX_NOT_SUPPORTED;
162
163 PCVMXMSRS pVmxMsrs = &pVM->hm.s.vmx.Msrs;
164 switch (idMsr)
165 {
166 case MSR_IA32_FEATURE_CONTROL: *puValue = pVmxMsrs->u64FeatCtrl; break;
167 case MSR_IA32_VMX_BASIC: *puValue = pVmxMsrs->u64Basic; break;
168 case MSR_IA32_VMX_PINBASED_CTLS: *puValue = pVmxMsrs->PinCtls.u; break;
169 case MSR_IA32_VMX_PROCBASED_CTLS: *puValue = pVmxMsrs->ProcCtls.u; break;
170 case MSR_IA32_VMX_PROCBASED_CTLS2: *puValue = pVmxMsrs->ProcCtls2.u; break;
171 case MSR_IA32_VMX_EXIT_CTLS: *puValue = pVmxMsrs->ExitCtls.u; break;
172 case MSR_IA32_VMX_ENTRY_CTLS: *puValue = pVmxMsrs->EntryCtls.u; break;
173 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: *puValue = pVmxMsrs->TruePinCtls.u; break;
174 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *puValue = pVmxMsrs->TrueProcCtls.u; break;
175 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: *puValue = pVmxMsrs->TrueEntryCtls.u; break;
176 case MSR_IA32_VMX_TRUE_EXIT_CTLS: *puValue = pVmxMsrs->TrueExitCtls.u; break;
177 case MSR_IA32_VMX_MISC: *puValue = pVmxMsrs->u64Misc; break;
178 case MSR_IA32_VMX_CR0_FIXED0: *puValue = pVmxMsrs->u64Cr0Fixed0; break;
179 case MSR_IA32_VMX_CR0_FIXED1: *puValue = pVmxMsrs->u64Cr0Fixed1; break;
180 case MSR_IA32_VMX_CR4_FIXED0: *puValue = pVmxMsrs->u64Cr4Fixed0; break;
181 case MSR_IA32_VMX_CR4_FIXED1: *puValue = pVmxMsrs->u64Cr4Fixed1; break;
182 case MSR_IA32_VMX_VMCS_ENUM: *puValue = pVmxMsrs->u64VmcsEnum; break;
183 case MSR_IA32_VMX_VMFUNC: *puValue = pVmxMsrs->u64VmFunc; break;
184 case MSR_IA32_VMX_EPT_VPID_CAP: *puValue = pVmxMsrs->u64EptVpidCaps; break;
185 default:
186 {
187 AssertMsgFailed(("Invalid MSR %#x\n", idMsr));
188 return VERR_NOT_FOUND;
189 }
190 }
191 return VINF_SUCCESS;
192}
193
194
195/**
196 * Gets the description of a VMX instruction diagnostic enum member.
197 *
198 * @returns The descriptive string.
199 * @param enmInstrDiag The VMX instruction diagnostic.
200 */
201VMM_INT_DECL(const char *) HMVmxGetInstrDiagDesc(VMXVINSTRDIAG enmInstrDiag)
202{
203 if (RT_LIKELY((unsigned)enmInstrDiag < RT_ELEMENTS(g_apszVmxInstrDiagDesc)))
204 return g_apszVmxInstrDiagDesc[enmInstrDiag];
205 return "Unknown/invalid";
206}
207
208
209/**
210 * Checks if a code selector (CS) is suitable for execution using hardware-assisted
211 * VMX when unrestricted execution isn't available.
212 *
213 * @returns true if selector is suitable for VMX, otherwise
214 * false.
215 * @param pSel Pointer to the selector to check (CS).
216 * @param uStackDpl The CPL, aka the DPL of the stack segment.
217 */
218static bool hmVmxIsCodeSelectorOk(PCCPUMSELREG pSel, unsigned uStackDpl)
219{
220 /*
221 * Segment must be an accessed code segment, it must be present and it must
222 * be usable.
223 * Note! These are all standard requirements and if CS holds anything else
224 * we've got buggy code somewhere!
225 */
226 AssertCompile(X86DESCATTR_TYPE == 0xf);
227 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P | X86DESCATTR_UNUSABLE))
228 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P),
229 ("%#x\n", pSel->Attr.u),
230 false);
231
232 /* For conforming segments, CS.DPL must be <= SS.DPL, while CS.DPL
233 must equal SS.DPL for non-confroming segments.
234 Note! This is also a hard requirement like above. */
235 AssertMsgReturn( pSel->Attr.n.u4Type & X86_SEL_TYPE_CONF
236 ? pSel->Attr.n.u2Dpl <= uStackDpl
237 : pSel->Attr.n.u2Dpl == uStackDpl,
238 ("u4Type=%#x u2Dpl=%u uStackDpl=%u\n", pSel->Attr.n.u4Type, pSel->Attr.n.u2Dpl, uStackDpl),
239 false);
240
241 /*
242 * The following two requirements are VT-x specific:
243 * - G bit must be set if any high limit bits are set.
244 * - G bit must be clear if any low limit bits are clear.
245 */
246 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
247 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
248 return true;
249 return false;
250}
251
252
253/**
254 * Checks if a data selector (DS/ES/FS/GS) is suitable for execution using
255 * hardware-assisted VMX when unrestricted execution isn't available.
256 *
257 * @returns true if selector is suitable for VMX, otherwise
258 * false.
259 * @param pSel Pointer to the selector to check
260 * (DS/ES/FS/GS).
261 */
262static bool hmVmxIsDataSelectorOk(PCCPUMSELREG pSel)
263{
264 /*
265 * Unusable segments are OK. These days they should be marked as such, as
266 * but as an alternative we for old saved states and AMD<->VT-x migration
267 * we also treat segments with all the attributes cleared as unusable.
268 */
269 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
270 return true;
271
272 /** @todo tighten these checks. Will require CPUM load adjusting. */
273
274 /* Segment must be accessed. */
275 if (pSel->Attr.u & X86_SEL_TYPE_ACCESSED)
276 {
277 /* Code segments must also be readable. */
278 if ( !(pSel->Attr.u & X86_SEL_TYPE_CODE)
279 || (pSel->Attr.u & X86_SEL_TYPE_READ))
280 {
281 /* The S bit must be set. */
282 if (pSel->Attr.n.u1DescType)
283 {
284 /* Except for conforming segments, DPL >= RPL. */
285 if ( pSel->Attr.n.u2Dpl >= (pSel->Sel & X86_SEL_RPL)
286 || pSel->Attr.n.u4Type >= X86_SEL_TYPE_ER_ACC)
287 {
288 /* Segment must be present. */
289 if (pSel->Attr.n.u1Present)
290 {
291 /*
292 * The following two requirements are VT-x specific:
293 * - G bit must be set if any high limit bits are set.
294 * - G bit must be clear if any low limit bits are clear.
295 */
296 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
297 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
298 return true;
299 }
300 }
301 }
302 }
303 }
304
305 return false;
306}
307
308
309/**
310 * Checks if the stack selector (SS) is suitable for execution using
311 * hardware-assisted VMX when unrestricted execution isn't available.
312 *
313 * @returns true if selector is suitable for VMX, otherwise
314 * false.
315 * @param pSel Pointer to the selector to check (SS).
316 */
317static bool hmVmxIsStackSelectorOk(PCCPUMSELREG pSel)
318{
319 /*
320 * Unusable segments are OK. These days they should be marked as such, as
321 * but as an alternative we for old saved states and AMD<->VT-x migration
322 * we also treat segments with all the attributes cleared as unusable.
323 */
324 /** @todo r=bird: actually all zeroes isn't gonna cut it... SS.DPL == CPL. */
325 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
326 return true;
327
328 /*
329 * Segment must be an accessed writable segment, it must be present.
330 * Note! These are all standard requirements and if SS holds anything else
331 * we've got buggy code somewhere!
332 */
333 AssertCompile(X86DESCATTR_TYPE == 0xf);
334 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P | X86_SEL_TYPE_CODE))
335 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P),
336 ("%#x\n", pSel->Attr.u), false);
337
338 /* DPL must equal RPL.
339 Note! This is also a hard requirement like above. */
340 AssertMsgReturn(pSel->Attr.n.u2Dpl == (pSel->Sel & X86_SEL_RPL),
341 ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel), false);
342
343 /*
344 * The following two requirements are VT-x specific:
345 * - G bit must be set if any high limit bits are set.
346 * - G bit must be clear if any low limit bits are clear.
347 */
348 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
349 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
350 return true;
351 return false;
352}
353
354
355/**
356 * Checks if the guest is in a suitable state for hardware-assisted VMX execution.
357 *
358 * @returns @c true if it is suitable, @c false otherwise.
359 * @param pVCpu The cross context virtual CPU structure.
360 * @param pCtx Pointer to the guest CPU context.
361 *
362 * @remarks @a pCtx can be a partial context and thus may not be necessarily the
363 * same as pVCpu->cpum.GstCtx! Thus don't eliminate the @a pCtx parameter.
364 * Secondly, if additional checks are added that require more of the CPU
365 * state, make sure REM (which supplies a partial state) is updated.
366 */
367VMM_INT_DECL(bool) HMVmxCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx)
368{
369 PVM pVM = pVCpu->CTX_SUFF(pVM);
370 Assert(HMIsEnabled(pVM));
371 Assert(!CPUMIsGuestVmxEnabled(pCtx));
372 Assert( ( pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS)
373 || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));
374
375 pVCpu->hm.s.fActive = false;
376
377 bool const fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVmmDevHeapIsEnabled(pVM);
378 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
379 {
380 /*
381 * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted
382 * guest execution feature is missing (VT-x only).
383 */
384 if (fSupportsRealMode)
385 {
386 if (CPUMIsGuestInRealModeEx(pCtx))
387 {
388 /*
389 * In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
390 * bases and limits, i.e. limit must be 64K and base must be selector * 16.
391 * If this is not true, we cannot execute real mode as V86 and have to fall
392 * back to emulation.
393 */
394 if ( pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)
395 || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)
396 || pCtx->es.Sel != (pCtx->es.u64Base >> 4)
397 || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)
398 || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)
399 || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4))
400 {
401 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelBase);
402 return false;
403 }
404 if ( (pCtx->cs.u32Limit != 0xffff)
405 || (pCtx->ds.u32Limit != 0xffff)
406 || (pCtx->es.u32Limit != 0xffff)
407 || (pCtx->ss.u32Limit != 0xffff)
408 || (pCtx->fs.u32Limit != 0xffff)
409 || (pCtx->gs.u32Limit != 0xffff))
410 {
411 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit);
412 return false;
413 }
414 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckRmOk);
415 }
416 else
417 {
418 /*
419 * Verify the requirements for executing code in protected mode. VT-x can't
420 * handle the CPU state right after a switch from real to protected mode
421 * (all sorts of RPL & DPL assumptions).
422 */
423 if (pVCpu->hm.s.vmx.fWasInRealMode)
424 {
425 /** @todo If guest is in V86 mode, these checks should be different! */
426 if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL))
427 {
428 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRpl);
429 return false;
430 }
431 if ( !hmVmxIsCodeSelectorOk(&pCtx->cs, pCtx->ss.Attr.n.u2Dpl)
432 || !hmVmxIsDataSelectorOk(&pCtx->ds)
433 || !hmVmxIsDataSelectorOk(&pCtx->es)
434 || !hmVmxIsDataSelectorOk(&pCtx->fs)
435 || !hmVmxIsDataSelectorOk(&pCtx->gs)
436 || !hmVmxIsStackSelectorOk(&pCtx->ss))
437 {
438 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
439 return false;
440 }
441 }
442 /* VT-x also chokes on invalid TR or LDTR selectors (minix). */
443 if (pCtx->gdtr.cbGdt)
444 {
445 if ((pCtx->tr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
446 {
447 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadTr);
448 return false;
449 }
450 else if ((pCtx->ldtr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
451 {
452 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadLdt);
453 return false;
454 }
455 }
456 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckPmOk);
457 }
458 }
459 else
460 {
461 if ( !CPUMIsGuestInLongModeEx(pCtx)
462 && !pVM->hm.s.vmx.fUnrestrictedGuest)
463 {
464 if ( !pVM->hm.s.fNestedPaging /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
465 || CPUMIsGuestInRealModeEx(pCtx)) /* Requires a fake TSS for real mode - stored in the VMM device heap */
466 return false;
467
468 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
469 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0)
470 return false;
471
472 /*
473 * The guest is about to complete the switch to protected mode. Wait a bit longer.
474 * Windows XP; switch to protected mode; all selectors are marked not present
475 * in the hidden registers (possible recompiler bug; see load_seg_vm).
476 */
477 /** @todo Is this supposed recompiler bug still relevant with IEM? */
478 if (pCtx->cs.Attr.n.u1Present == 0)
479 return false;
480 if (pCtx->ss.Attr.n.u1Present == 0)
481 return false;
482
483 /*
484 * Windows XP: possible same as above, but new recompiler requires new
485 * heuristics? VT-x doesn't seem to like something about the guest state and
486 * this stuff avoids it.
487 */
488 /** @todo This check is actually wrong, it doesn't take the direction of the
489 * stack segment into account. But, it does the job for now. */
490 if (pCtx->rsp >= pCtx->ss.u32Limit)
491 return false;
492 }
493 }
494 }
495
496 if (pVM->hm.s.vmx.fEnabled)
497 {
498 uint32_t uCr0Mask;
499
500 /* If bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
501 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr0Fixed0;
502
503 /* We ignore the NE bit here on purpose; see HMR0.cpp for details. */
504 uCr0Mask &= ~X86_CR0_NE;
505
506 if (fSupportsRealMode)
507 {
508 /* We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
509 uCr0Mask &= ~(X86_CR0_PG|X86_CR0_PE);
510 }
511 else
512 {
513 /* We support protected mode without paging using identity mapping. */
514 uCr0Mask &= ~X86_CR0_PG;
515 }
516 if ((pCtx->cr0 & uCr0Mask) != uCr0Mask)
517 return false;
518
519 /* If bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
520 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
521 if ((pCtx->cr0 & uCr0Mask) != 0)
522 return false;
523
524 /* If bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
525 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr4Fixed0;
526 uCr0Mask &= ~X86_CR4_VMXE;
527 if ((pCtx->cr4 & uCr0Mask) != uCr0Mask)
528 return false;
529
530 /* If bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
531 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
532 if ((pCtx->cr4 & uCr0Mask) != 0)
533 return false;
534
535 pVCpu->hm.s.fActive = true;
536 return true;
537 }
538
539 return false;
540}
541
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette