VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp@ 73783

Last change on this file since 73783 was 73756, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 VMCLEAR skeleton.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 23.3 KB
Line 
1/* $Id: HMVMXAll.cpp 73756 2018-08-18 05:13:26Z vboxsync $ */
2/** @file
3 * HM VMX (VT-x) - All contexts.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include "HMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/pdmapi.h>
27
28
29/*********************************************************************************************************************************
30* Global Variables *
31*********************************************************************************************************************************/
32#define VMX_INSTR_DIAG_DESC(a_Def, a_Desc) #a_Def " - " #a_Desc
33static const char * const g_apszVmxInstrDiagDesc[kVmxVInstrDiag_Last] =
34{
35 /* Internal processing errors. */
36 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_1 , "Ipe_1" ),
37 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_2 , "Ipe_2" ),
38 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_3 , "Ipe_3" ),
39 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_4 , "Ipe_4" ),
40 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_5 , "Ipe_5" ),
41 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_6 , "Ipe_6" ),
42 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_7 , "Ipe_7" ),
43 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_8 , "Ipe_8" ),
44 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_9 , "Ipe_9" ),
45 /* VMXON. */
46 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_A20M , "A20M" ),
47 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cpl , "Cpl" ),
48 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr0Fixed0 , "Cr0Fixed0" ),
49 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr4Fixed0 , "Cr4Fixed0" ),
50 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Intercept , "Intercept" ),
51 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_LongModeCS , "LongModeCS" ),
52 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_MsrFeatCtl , "MsrFeatCtl" ),
53 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAbnormal , "PtrAbnormal" ),
54 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAlign , "PtrAlign" ),
55 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrMap , "PtrMap" ),
56 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrPhysRead , "PtrPhysRead" ),
57 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrWidth , "PtrWidth" ),
58 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_RealOrV86Mode , "RealOrV86Mode"),
59 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Success , "Success" ),
60 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_ShadowVmcs , "ShadowVmcs" ),
61 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Vmxe , "Vmxe" ),
62 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmcsRevId , "VmcsRevId" ),
63 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxRoot , "VmxRoot" ),
64 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxRootCpl , "VmxRootCpl" ),
65 /* VMXOFF. */
66 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Cpl , "Cpl" ),
67 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Intercept , "Intercept" ),
68 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_LongModeCS , "LongModeCS" ),
69 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_RealOrV86Mode, "RealOrV86Mode"),
70 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Success , "Success" ),
71 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Vmxe , "Vmxe" ),
72 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_VmxRoot , "VmxRoot" ),
73 /* VMPTRLD. */
74 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_Cpl , "Cpl" ),
75 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrAbnormal , "PtrAbnormal" ),
76 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrAlign , "PtrAlign" ),
77 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrMap , "PtrMap" ),
78 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrReadPhys , "PtrReadPhys" ),
79 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrVmxon , "PtrVmxon" ),
80 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrWidth , "PtrWidth" ),
81 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_ShadowVmcs , "ShadowVmcs" ),
82 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_Success , "Success" ),
83 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_VmcsRevId , "VmcsRevId" )
84 /* VMPTRST. */
85 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_Cpl , "Cpl" ),
86 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_PtrMap , "PtrMap" ),
87 /* VMCLEAR. */
88 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Cpl , "Cpl" )
89 /* kVmxVInstrDiag_Last */
90};
91#undef VMX_INSTR_DIAG_DESC
92
93
94/**
95 * Gets a copy of the VMX host MSRs that were read by HM during ring-0
96 * initialization.
97 *
98 * @return VBox status code.
99 * @param pVM The cross context VM structure.
100 * @param pVmxMsrs Where to store the VMXMSRS struct (only valid when
101 * VINF_SUCCESS is returned).
102 *
103 * @remarks Caller needs to take care not to call this function too early. Call
104 * after HM initialization is fully complete.
105 */
106VMM_INT_DECL(int) HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs)
107{
108 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
109 AssertPtrReturn(pVmxMsrs, VERR_INVALID_PARAMETER);
110 if (pVM->hm.s.vmx.fSupported)
111 {
112 *pVmxMsrs = pVM->hm.s.vmx.Msrs;
113 return VINF_SUCCESS;
114 }
115 return VERR_VMX_NOT_SUPPORTED;
116}
117
118
119/**
120 * Gets the specified VMX host MSR that was read by HM during ring-0
121 * initialization.
122 *
123 * @return VBox status code.
124 * @param pVM The cross context VM structure.
125 * @param idMsr The MSR.
126 * @param puValue Where to store the MSR value (only updated when VINF_SUCCESS
127 * is returned).
128 *
129 * @remarks Caller needs to take care not to call this function too early. Call
130 * after HM initialization is fully complete.
131 */
132VMM_INT_DECL(int) HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue)
133{
134 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
135 AssertPtrReturn(puValue, VERR_INVALID_PARAMETER);
136
137 if (!pVM->hm.s.vmx.fSupported)
138 return VERR_VMX_NOT_SUPPORTED;
139
140 PCVMXMSRS pVmxMsrs = &pVM->hm.s.vmx.Msrs;
141 switch (idMsr)
142 {
143 case MSR_IA32_FEATURE_CONTROL: *puValue = pVmxMsrs->u64FeatCtrl; break;
144 case MSR_IA32_VMX_BASIC: *puValue = pVmxMsrs->u64Basic; break;
145 case MSR_IA32_VMX_PINBASED_CTLS: *puValue = pVmxMsrs->PinCtls.u; break;
146 case MSR_IA32_VMX_PROCBASED_CTLS: *puValue = pVmxMsrs->ProcCtls.u; break;
147 case MSR_IA32_VMX_PROCBASED_CTLS2: *puValue = pVmxMsrs->ProcCtls2.u; break;
148 case MSR_IA32_VMX_EXIT_CTLS: *puValue = pVmxMsrs->ExitCtls.u; break;
149 case MSR_IA32_VMX_ENTRY_CTLS: *puValue = pVmxMsrs->EntryCtls.u; break;
150 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: *puValue = pVmxMsrs->TruePinCtls.u; break;
151 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *puValue = pVmxMsrs->TrueProcCtls.u; break;
152 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: *puValue = pVmxMsrs->TrueEntryCtls.u; break;
153 case MSR_IA32_VMX_TRUE_EXIT_CTLS: *puValue = pVmxMsrs->TrueExitCtls.u; break;
154 case MSR_IA32_VMX_MISC: *puValue = pVmxMsrs->u64Misc; break;
155 case MSR_IA32_VMX_CR0_FIXED0: *puValue = pVmxMsrs->u64Cr0Fixed0; break;
156 case MSR_IA32_VMX_CR0_FIXED1: *puValue = pVmxMsrs->u64Cr0Fixed1; break;
157 case MSR_IA32_VMX_CR4_FIXED0: *puValue = pVmxMsrs->u64Cr4Fixed0; break;
158 case MSR_IA32_VMX_CR4_FIXED1: *puValue = pVmxMsrs->u64Cr4Fixed1; break;
159 case MSR_IA32_VMX_VMCS_ENUM: *puValue = pVmxMsrs->u64VmcsEnum; break;
160 case MSR_IA32_VMX_VMFUNC: *puValue = pVmxMsrs->u64VmFunc; break;
161 case MSR_IA32_VMX_EPT_VPID_CAP: *puValue = pVmxMsrs->u64EptVpidCaps; break;
162 default:
163 {
164 AssertMsgFailed(("Invalid MSR %#x\n", idMsr));
165 return VERR_NOT_FOUND;
166 }
167 }
168 return VINF_SUCCESS;
169}
170
171
172/**
173 * Gets the description of a VMX instruction diagnostic enum member.
174 *
175 * @returns The descriptive string.
176 * @param enmInstrDiag The VMX instruction diagnostic.
177 */
178VMM_INT_DECL(const char *) HMVmxGetInstrDiagDesc(VMXVINSTRDIAG enmInstrDiag)
179{
180 if (RT_LIKELY((unsigned)enmInstrDiag < RT_ELEMENTS(g_apszVmxInstrDiagDesc)))
181 return g_apszVmxInstrDiagDesc[enmInstrDiag];
182 return "Unknown/invalid";
183}
184
185
186/**
187 * Checks if a code selector (CS) is suitable for execution using hardware-assisted
188 * VMX when unrestricted execution isn't available.
189 *
190 * @returns true if selector is suitable for VMX, otherwise
191 * false.
192 * @param pSel Pointer to the selector to check (CS).
193 * @param uStackDpl The CPL, aka the DPL of the stack segment.
194 */
195static bool hmVmxIsCodeSelectorOk(PCCPUMSELREG pSel, unsigned uStackDpl)
196{
197 /*
198 * Segment must be an accessed code segment, it must be present and it must
199 * be usable.
200 * Note! These are all standard requirements and if CS holds anything else
201 * we've got buggy code somewhere!
202 */
203 AssertCompile(X86DESCATTR_TYPE == 0xf);
204 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P | X86DESCATTR_UNUSABLE))
205 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P),
206 ("%#x\n", pSel->Attr.u),
207 false);
208
209 /* For conforming segments, CS.DPL must be <= SS.DPL, while CS.DPL
210 must equal SS.DPL for non-confroming segments.
211 Note! This is also a hard requirement like above. */
212 AssertMsgReturn( pSel->Attr.n.u4Type & X86_SEL_TYPE_CONF
213 ? pSel->Attr.n.u2Dpl <= uStackDpl
214 : pSel->Attr.n.u2Dpl == uStackDpl,
215 ("u4Type=%#x u2Dpl=%u uStackDpl=%u\n", pSel->Attr.n.u4Type, pSel->Attr.n.u2Dpl, uStackDpl),
216 false);
217
218 /*
219 * The following two requirements are VT-x specific:
220 * - G bit must be set if any high limit bits are set.
221 * - G bit must be clear if any low limit bits are clear.
222 */
223 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
224 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
225 return true;
226 return false;
227}
228
229
230/**
231 * Checks if a data selector (DS/ES/FS/GS) is suitable for execution using
232 * hardware-assisted VMX when unrestricted execution isn't available.
233 *
234 * @returns true if selector is suitable for VMX, otherwise
235 * false.
236 * @param pSel Pointer to the selector to check
237 * (DS/ES/FS/GS).
238 */
239static bool hmVmxIsDataSelectorOk(PCCPUMSELREG pSel)
240{
241 /*
242 * Unusable segments are OK. These days they should be marked as such, as
243 * but as an alternative we for old saved states and AMD<->VT-x migration
244 * we also treat segments with all the attributes cleared as unusable.
245 */
246 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
247 return true;
248
249 /** @todo tighten these checks. Will require CPUM load adjusting. */
250
251 /* Segment must be accessed. */
252 if (pSel->Attr.u & X86_SEL_TYPE_ACCESSED)
253 {
254 /* Code segments must also be readable. */
255 if ( !(pSel->Attr.u & X86_SEL_TYPE_CODE)
256 || (pSel->Attr.u & X86_SEL_TYPE_READ))
257 {
258 /* The S bit must be set. */
259 if (pSel->Attr.n.u1DescType)
260 {
261 /* Except for conforming segments, DPL >= RPL. */
262 if ( pSel->Attr.n.u2Dpl >= (pSel->Sel & X86_SEL_RPL)
263 || pSel->Attr.n.u4Type >= X86_SEL_TYPE_ER_ACC)
264 {
265 /* Segment must be present. */
266 if (pSel->Attr.n.u1Present)
267 {
268 /*
269 * The following two requirements are VT-x specific:
270 * - G bit must be set if any high limit bits are set.
271 * - G bit must be clear if any low limit bits are clear.
272 */
273 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
274 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
275 return true;
276 }
277 }
278 }
279 }
280 }
281
282 return false;
283}
284
285
286/**
287 * Checks if the stack selector (SS) is suitable for execution using
288 * hardware-assisted VMX when unrestricted execution isn't available.
289 *
290 * @returns true if selector is suitable for VMX, otherwise
291 * false.
292 * @param pSel Pointer to the selector to check (SS).
293 */
294static bool hmVmxIsStackSelectorOk(PCCPUMSELREG pSel)
295{
296 /*
297 * Unusable segments are OK. These days they should be marked as such, as
298 * but as an alternative we for old saved states and AMD<->VT-x migration
299 * we also treat segments with all the attributes cleared as unusable.
300 */
301 /** @todo r=bird: actually all zeroes isn't gonna cut it... SS.DPL == CPL. */
302 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
303 return true;
304
305 /*
306 * Segment must be an accessed writable segment, it must be present.
307 * Note! These are all standard requirements and if SS holds anything else
308 * we've got buggy code somewhere!
309 */
310 AssertCompile(X86DESCATTR_TYPE == 0xf);
311 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P | X86_SEL_TYPE_CODE))
312 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P),
313 ("%#x\n", pSel->Attr.u), false);
314
315 /* DPL must equal RPL.
316 Note! This is also a hard requirement like above. */
317 AssertMsgReturn(pSel->Attr.n.u2Dpl == (pSel->Sel & X86_SEL_RPL),
318 ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel), false);
319
320 /*
321 * The following two requirements are VT-x specific:
322 * - G bit must be set if any high limit bits are set.
323 * - G bit must be clear if any low limit bits are clear.
324 */
325 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
326 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
327 return true;
328 return false;
329}
330
331
332/**
333 * Checks if the guest is in a suitable state for hardware-assisted VMX execution.
334 *
335 * @returns @c true if it is suitable, @c false otherwise.
336 * @param pVCpu The cross context virtual CPU structure.
337 * @param pCtx Pointer to the guest CPU context.
338 *
339 * @remarks @a pCtx can be a partial context and thus may not be necessarily the
340 * same as pVCpu->cpum.GstCtx! Thus don't eliminate the @a pCtx parameter.
341 * Secondly, if additional checks are added that require more of the CPU
342 * state, make sure REM (which supplies a partial state) is updated.
343 */
344VMM_INT_DECL(bool) HMVmxCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx)
345{
346 PVM pVM = pVCpu->CTX_SUFF(pVM);
347 Assert(HMIsEnabled(pVM));
348 Assert(!CPUMIsGuestVmxEnabled(pCtx));
349 Assert( ( pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS)
350 || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));
351
352 pVCpu->hm.s.fActive = false;
353
354 bool const fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVmmDevHeapIsEnabled(pVM);
355 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
356 {
357 /*
358 * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted
359 * guest execution feature is missing (VT-x only).
360 */
361 if (fSupportsRealMode)
362 {
363 if (CPUMIsGuestInRealModeEx(pCtx))
364 {
365 /*
366 * In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
367 * bases and limits, i.e. limit must be 64K and base must be selector * 16.
368 * If this is not true, we cannot execute real mode as V86 and have to fall
369 * back to emulation.
370 */
371 if ( pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)
372 || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)
373 || pCtx->es.Sel != (pCtx->es.u64Base >> 4)
374 || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)
375 || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)
376 || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4))
377 {
378 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelBase);
379 return false;
380 }
381 if ( (pCtx->cs.u32Limit != 0xffff)
382 || (pCtx->ds.u32Limit != 0xffff)
383 || (pCtx->es.u32Limit != 0xffff)
384 || (pCtx->ss.u32Limit != 0xffff)
385 || (pCtx->fs.u32Limit != 0xffff)
386 || (pCtx->gs.u32Limit != 0xffff))
387 {
388 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit);
389 return false;
390 }
391 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckRmOk);
392 }
393 else
394 {
395 /*
396 * Verify the requirements for executing code in protected mode. VT-x can't
397 * handle the CPU state right after a switch from real to protected mode
398 * (all sorts of RPL & DPL assumptions).
399 */
400 if (pVCpu->hm.s.vmx.fWasInRealMode)
401 {
402 /** @todo If guest is in V86 mode, these checks should be different! */
403 if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL))
404 {
405 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRpl);
406 return false;
407 }
408 if ( !hmVmxIsCodeSelectorOk(&pCtx->cs, pCtx->ss.Attr.n.u2Dpl)
409 || !hmVmxIsDataSelectorOk(&pCtx->ds)
410 || !hmVmxIsDataSelectorOk(&pCtx->es)
411 || !hmVmxIsDataSelectorOk(&pCtx->fs)
412 || !hmVmxIsDataSelectorOk(&pCtx->gs)
413 || !hmVmxIsStackSelectorOk(&pCtx->ss))
414 {
415 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
416 return false;
417 }
418 }
419 /* VT-x also chokes on invalid TR or LDTR selectors (minix). */
420 if (pCtx->gdtr.cbGdt)
421 {
422 if ((pCtx->tr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
423 {
424 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadTr);
425 return false;
426 }
427 else if ((pCtx->ldtr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
428 {
429 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadLdt);
430 return false;
431 }
432 }
433 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckPmOk);
434 }
435 }
436 else
437 {
438 if ( !CPUMIsGuestInLongModeEx(pCtx)
439 && !pVM->hm.s.vmx.fUnrestrictedGuest)
440 {
441 if ( !pVM->hm.s.fNestedPaging /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
442 || CPUMIsGuestInRealModeEx(pCtx)) /* Requires a fake TSS for real mode - stored in the VMM device heap */
443 return false;
444
445 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
446 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0)
447 return false;
448
449 /*
450 * The guest is about to complete the switch to protected mode. Wait a bit longer.
451 * Windows XP; switch to protected mode; all selectors are marked not present
452 * in the hidden registers (possible recompiler bug; see load_seg_vm).
453 */
454 /** @todo Is this supposed recompiler bug still relevant with IEM? */
455 if (pCtx->cs.Attr.n.u1Present == 0)
456 return false;
457 if (pCtx->ss.Attr.n.u1Present == 0)
458 return false;
459
460 /*
461 * Windows XP: possible same as above, but new recompiler requires new
462 * heuristics? VT-x doesn't seem to like something about the guest state and
463 * this stuff avoids it.
464 */
465 /** @todo This check is actually wrong, it doesn't take the direction of the
466 * stack segment into account. But, it does the job for now. */
467 if (pCtx->rsp >= pCtx->ss.u32Limit)
468 return false;
469 }
470 }
471 }
472
473 if (pVM->hm.s.vmx.fEnabled)
474 {
475 uint32_t uCr0Mask;
476
477 /* If bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
478 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr0Fixed0;
479
480 /* We ignore the NE bit here on purpose; see HMR0.cpp for details. */
481 uCr0Mask &= ~X86_CR0_NE;
482
483 if (fSupportsRealMode)
484 {
485 /* We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
486 uCr0Mask &= ~(X86_CR0_PG|X86_CR0_PE);
487 }
488 else
489 {
490 /* We support protected mode without paging using identity mapping. */
491 uCr0Mask &= ~X86_CR0_PG;
492 }
493 if ((pCtx->cr0 & uCr0Mask) != uCr0Mask)
494 return false;
495
496 /* If bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
497 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
498 if ((pCtx->cr0 & uCr0Mask) != 0)
499 return false;
500
501 /* If bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
502 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr4Fixed0;
503 uCr0Mask &= ~X86_CR4_VMXE;
504 if ((pCtx->cr4 & uCr0Mask) != uCr0Mask)
505 return false;
506
507 /* If bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
508 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
509 if ((pCtx->cr4 & uCr0Mask) != 0)
510 return false;
511
512 pVCpu->hm.s.fActive = true;
513 return true;
514 }
515
516 return false;
517}
518
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette