VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp@ 73937

Last change on this file since 73937 was 73937, checked in by vboxsync, 6 years ago

VMM/IEM, HM: Nested VMX: bugref:9180 Implemented VMWRITE instruction.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 24.7 KB
Line 
1/* $Id: HMVMXAll.cpp 73937 2018-08-29 06:12:35Z vboxsync $ */
2/** @file
3 * HM VMX (VT-x) - All contexts.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include "HMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/pdmapi.h>
27
28
29/*********************************************************************************************************************************
30* Global Variables *
31*********************************************************************************************************************************/
32#define VMX_INSTR_DIAG_DESC(a_Def, a_Desc) #a_Def " - " #a_Desc
33static const char * const g_apszVmxInstrDiagDesc[kVmxVInstrDiag_Last] =
34{
35 /* Internal processing errors. */
36 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_1 , "Ipe_1" ),
37 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_2 , "Ipe_2" ),
38 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_3 , "Ipe_3" ),
39 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_4 , "Ipe_4" ),
40 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_5 , "Ipe_5" ),
41 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_6 , "Ipe_6" ),
42 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_7 , "Ipe_7" ),
43 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_8 , "Ipe_8" ),
44 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_9 , "Ipe_9" ),
45 /* VMXON. */
46 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_A20M , "A20M" ),
47 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cpl , "Cpl" ),
48 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr0Fixed0 , "Cr0Fixed0" ),
49 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr4Fixed0 , "Cr4Fixed0" ),
50 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Intercept , "Intercept" ),
51 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_LongModeCS , "LongModeCS" ),
52 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_MsrFeatCtl , "MsrFeatCtl" ),
53 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAbnormal , "PtrAbnormal" ),
54 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAlign , "PtrAlign" ),
55 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrMap , "PtrMap" ),
56 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrPhysRead , "PtrPhysRead" ),
57 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrWidth , "PtrWidth" ),
58 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_RealOrV86Mode , "RealOrV86Mode" ),
59 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Success , "Success" ),
60 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_ShadowVmcs , "ShadowVmcs" ),
61 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Vmxe , "Vmxe" ),
62 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmcsRevId , "VmcsRevId" ),
63 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxRoot , "VmxRoot" ),
64 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxRootCpl , "VmxRootCpl" ),
65 /* VMXOFF. */
66 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Cpl , "Cpl" ),
67 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Intercept , "Intercept" ),
68 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_LongModeCS , "LongModeCS" ),
69 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_RealOrV86Mode , "RealOrV86Mode" ),
70 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Success , "Success" ),
71 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Vmxe , "Vmxe" ),
72 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_VmxRoot , "VmxRoot" ),
73 /* VMPTRLD. */
74 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_Cpl , "Cpl" ),
75 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrAbnormal , "PtrAbnormal" ),
76 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrAlign , "PtrAlign" ),
77 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrMap , "PtrMap" ),
78 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrReadPhys , "PtrReadPhys" ),
79 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrVmxon , "PtrVmxon" ),
80 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_PtrWidth , "PtrWidth" ),
81 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_ShadowVmcs , "ShadowVmcs" ),
82 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_Success , "Success" ),
83 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrld_VmcsRevId , "VmcsRevId" )
84 /* VMPTRST. */
85 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_Cpl , "Cpl" ),
86 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_PtrMap , "PtrMap" ),
87 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_Success , "Success" ),
88 /* VMCLEAR. */
89 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Cpl , "Cpl" ),
90 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrAbnormal , "PtrAbnormal" ),
91 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrAlign , "PtrAlign" ),
92 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrMap , "PtrMap" ),
93 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrReadPhys , "PtrReadPhys" ),
94 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrVmxon , "PtrVmxon" ),
95 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrWidth , "PtrWidth" ),
96 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Success , "Success" ),
97 /* VMWRITE. */
98 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_Cpl , "Cpl" ),
99 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_FieldInvalid , "FieldInvalid" ),
100 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_FieldRo , "FieldRo" ),
101 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_LinkPtrInvalid, "LinkPtrInvalid"),
102 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_PtrInvalid , "PtrInvalid" ),
103 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_PtrMap , "PtrMap" ),
104 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmwrite_Success , "Success" )
105 /* kVmxVInstrDiag_Last */
106};
107#undef VMX_INSTR_DIAG_DESC
108
109
110/**
111 * Gets a copy of the VMX host MSRs that were read by HM during ring-0
112 * initialization.
113 *
114 * @return VBox status code.
115 * @param pVM The cross context VM structure.
116 * @param pVmxMsrs Where to store the VMXMSRS struct (only valid when
117 * VINF_SUCCESS is returned).
118 *
119 * @remarks Caller needs to take care not to call this function too early. Call
120 * after HM initialization is fully complete.
121 */
122VMM_INT_DECL(int) HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs)
123{
124 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
125 AssertPtrReturn(pVmxMsrs, VERR_INVALID_PARAMETER);
126 if (pVM->hm.s.vmx.fSupported)
127 {
128 *pVmxMsrs = pVM->hm.s.vmx.Msrs;
129 return VINF_SUCCESS;
130 }
131 return VERR_VMX_NOT_SUPPORTED;
132}
133
134
135/**
136 * Gets the specified VMX host MSR that was read by HM during ring-0
137 * initialization.
138 *
139 * @return VBox status code.
140 * @param pVM The cross context VM structure.
141 * @param idMsr The MSR.
142 * @param puValue Where to store the MSR value (only updated when VINF_SUCCESS
143 * is returned).
144 *
145 * @remarks Caller needs to take care not to call this function too early. Call
146 * after HM initialization is fully complete.
147 */
148VMM_INT_DECL(int) HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue)
149{
150 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
151 AssertPtrReturn(puValue, VERR_INVALID_PARAMETER);
152
153 if (!pVM->hm.s.vmx.fSupported)
154 return VERR_VMX_NOT_SUPPORTED;
155
156 PCVMXMSRS pVmxMsrs = &pVM->hm.s.vmx.Msrs;
157 switch (idMsr)
158 {
159 case MSR_IA32_FEATURE_CONTROL: *puValue = pVmxMsrs->u64FeatCtrl; break;
160 case MSR_IA32_VMX_BASIC: *puValue = pVmxMsrs->u64Basic; break;
161 case MSR_IA32_VMX_PINBASED_CTLS: *puValue = pVmxMsrs->PinCtls.u; break;
162 case MSR_IA32_VMX_PROCBASED_CTLS: *puValue = pVmxMsrs->ProcCtls.u; break;
163 case MSR_IA32_VMX_PROCBASED_CTLS2: *puValue = pVmxMsrs->ProcCtls2.u; break;
164 case MSR_IA32_VMX_EXIT_CTLS: *puValue = pVmxMsrs->ExitCtls.u; break;
165 case MSR_IA32_VMX_ENTRY_CTLS: *puValue = pVmxMsrs->EntryCtls.u; break;
166 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: *puValue = pVmxMsrs->TruePinCtls.u; break;
167 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *puValue = pVmxMsrs->TrueProcCtls.u; break;
168 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: *puValue = pVmxMsrs->TrueEntryCtls.u; break;
169 case MSR_IA32_VMX_TRUE_EXIT_CTLS: *puValue = pVmxMsrs->TrueExitCtls.u; break;
170 case MSR_IA32_VMX_MISC: *puValue = pVmxMsrs->u64Misc; break;
171 case MSR_IA32_VMX_CR0_FIXED0: *puValue = pVmxMsrs->u64Cr0Fixed0; break;
172 case MSR_IA32_VMX_CR0_FIXED1: *puValue = pVmxMsrs->u64Cr0Fixed1; break;
173 case MSR_IA32_VMX_CR4_FIXED0: *puValue = pVmxMsrs->u64Cr4Fixed0; break;
174 case MSR_IA32_VMX_CR4_FIXED1: *puValue = pVmxMsrs->u64Cr4Fixed1; break;
175 case MSR_IA32_VMX_VMCS_ENUM: *puValue = pVmxMsrs->u64VmcsEnum; break;
176 case MSR_IA32_VMX_VMFUNC: *puValue = pVmxMsrs->u64VmFunc; break;
177 case MSR_IA32_VMX_EPT_VPID_CAP: *puValue = pVmxMsrs->u64EptVpidCaps; break;
178 default:
179 {
180 AssertMsgFailed(("Invalid MSR %#x\n", idMsr));
181 return VERR_NOT_FOUND;
182 }
183 }
184 return VINF_SUCCESS;
185}
186
187
188/**
189 * Gets the description of a VMX instruction diagnostic enum member.
190 *
191 * @returns The descriptive string.
192 * @param enmInstrDiag The VMX instruction diagnostic.
193 */
194VMM_INT_DECL(const char *) HMVmxGetInstrDiagDesc(VMXVINSTRDIAG enmInstrDiag)
195{
196 if (RT_LIKELY((unsigned)enmInstrDiag < RT_ELEMENTS(g_apszVmxInstrDiagDesc)))
197 return g_apszVmxInstrDiagDesc[enmInstrDiag];
198 return "Unknown/invalid";
199}
200
201
202/**
203 * Checks if a code selector (CS) is suitable for execution using hardware-assisted
204 * VMX when unrestricted execution isn't available.
205 *
206 * @returns true if selector is suitable for VMX, otherwise
207 * false.
208 * @param pSel Pointer to the selector to check (CS).
209 * @param uStackDpl The CPL, aka the DPL of the stack segment.
210 */
211static bool hmVmxIsCodeSelectorOk(PCCPUMSELREG pSel, unsigned uStackDpl)
212{
213 /*
214 * Segment must be an accessed code segment, it must be present and it must
215 * be usable.
216 * Note! These are all standard requirements and if CS holds anything else
217 * we've got buggy code somewhere!
218 */
219 AssertCompile(X86DESCATTR_TYPE == 0xf);
220 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P | X86DESCATTR_UNUSABLE))
221 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P),
222 ("%#x\n", pSel->Attr.u),
223 false);
224
225 /* For conforming segments, CS.DPL must be <= SS.DPL, while CS.DPL
226 must equal SS.DPL for non-confroming segments.
227 Note! This is also a hard requirement like above. */
228 AssertMsgReturn( pSel->Attr.n.u4Type & X86_SEL_TYPE_CONF
229 ? pSel->Attr.n.u2Dpl <= uStackDpl
230 : pSel->Attr.n.u2Dpl == uStackDpl,
231 ("u4Type=%#x u2Dpl=%u uStackDpl=%u\n", pSel->Attr.n.u4Type, pSel->Attr.n.u2Dpl, uStackDpl),
232 false);
233
234 /*
235 * The following two requirements are VT-x specific:
236 * - G bit must be set if any high limit bits are set.
237 * - G bit must be clear if any low limit bits are clear.
238 */
239 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
240 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
241 return true;
242 return false;
243}
244
245
246/**
247 * Checks if a data selector (DS/ES/FS/GS) is suitable for execution using
248 * hardware-assisted VMX when unrestricted execution isn't available.
249 *
250 * @returns true if selector is suitable for VMX, otherwise
251 * false.
252 * @param pSel Pointer to the selector to check
253 * (DS/ES/FS/GS).
254 */
255static bool hmVmxIsDataSelectorOk(PCCPUMSELREG pSel)
256{
257 /*
258 * Unusable segments are OK. These days they should be marked as such, as
259 * but as an alternative we for old saved states and AMD<->VT-x migration
260 * we also treat segments with all the attributes cleared as unusable.
261 */
262 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
263 return true;
264
265 /** @todo tighten these checks. Will require CPUM load adjusting. */
266
267 /* Segment must be accessed. */
268 if (pSel->Attr.u & X86_SEL_TYPE_ACCESSED)
269 {
270 /* Code segments must also be readable. */
271 if ( !(pSel->Attr.u & X86_SEL_TYPE_CODE)
272 || (pSel->Attr.u & X86_SEL_TYPE_READ))
273 {
274 /* The S bit must be set. */
275 if (pSel->Attr.n.u1DescType)
276 {
277 /* Except for conforming segments, DPL >= RPL. */
278 if ( pSel->Attr.n.u2Dpl >= (pSel->Sel & X86_SEL_RPL)
279 || pSel->Attr.n.u4Type >= X86_SEL_TYPE_ER_ACC)
280 {
281 /* Segment must be present. */
282 if (pSel->Attr.n.u1Present)
283 {
284 /*
285 * The following two requirements are VT-x specific:
286 * - G bit must be set if any high limit bits are set.
287 * - G bit must be clear if any low limit bits are clear.
288 */
289 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
290 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
291 return true;
292 }
293 }
294 }
295 }
296 }
297
298 return false;
299}
300
301
302/**
303 * Checks if the stack selector (SS) is suitable for execution using
304 * hardware-assisted VMX when unrestricted execution isn't available.
305 *
306 * @returns true if selector is suitable for VMX, otherwise
307 * false.
308 * @param pSel Pointer to the selector to check (SS).
309 */
310static bool hmVmxIsStackSelectorOk(PCCPUMSELREG pSel)
311{
312 /*
313 * Unusable segments are OK. These days they should be marked as such, as
314 * but as an alternative we for old saved states and AMD<->VT-x migration
315 * we also treat segments with all the attributes cleared as unusable.
316 */
317 /** @todo r=bird: actually all zeroes isn't gonna cut it... SS.DPL == CPL. */
318 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
319 return true;
320
321 /*
322 * Segment must be an accessed writable segment, it must be present.
323 * Note! These are all standard requirements and if SS holds anything else
324 * we've got buggy code somewhere!
325 */
326 AssertCompile(X86DESCATTR_TYPE == 0xf);
327 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P | X86_SEL_TYPE_CODE))
328 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P),
329 ("%#x\n", pSel->Attr.u), false);
330
331 /* DPL must equal RPL.
332 Note! This is also a hard requirement like above. */
333 AssertMsgReturn(pSel->Attr.n.u2Dpl == (pSel->Sel & X86_SEL_RPL),
334 ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel), false);
335
336 /*
337 * The following two requirements are VT-x specific:
338 * - G bit must be set if any high limit bits are set.
339 * - G bit must be clear if any low limit bits are clear.
340 */
341 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
342 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
343 return true;
344 return false;
345}
346
347
348/**
349 * Checks if the guest is in a suitable state for hardware-assisted VMX execution.
350 *
351 * @returns @c true if it is suitable, @c false otherwise.
352 * @param pVCpu The cross context virtual CPU structure.
353 * @param pCtx Pointer to the guest CPU context.
354 *
355 * @remarks @a pCtx can be a partial context and thus may not be necessarily the
356 * same as pVCpu->cpum.GstCtx! Thus don't eliminate the @a pCtx parameter.
357 * Secondly, if additional checks are added that require more of the CPU
358 * state, make sure REM (which supplies a partial state) is updated.
359 */
360VMM_INT_DECL(bool) HMVmxCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx)
361{
362 PVM pVM = pVCpu->CTX_SUFF(pVM);
363 Assert(HMIsEnabled(pVM));
364 Assert(!CPUMIsGuestVmxEnabled(pCtx));
365 Assert( ( pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS)
366 || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));
367
368 pVCpu->hm.s.fActive = false;
369
370 bool const fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVmmDevHeapIsEnabled(pVM);
371 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
372 {
373 /*
374 * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted
375 * guest execution feature is missing (VT-x only).
376 */
377 if (fSupportsRealMode)
378 {
379 if (CPUMIsGuestInRealModeEx(pCtx))
380 {
381 /*
382 * In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
383 * bases and limits, i.e. limit must be 64K and base must be selector * 16.
384 * If this is not true, we cannot execute real mode as V86 and have to fall
385 * back to emulation.
386 */
387 if ( pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)
388 || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)
389 || pCtx->es.Sel != (pCtx->es.u64Base >> 4)
390 || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)
391 || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)
392 || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4))
393 {
394 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelBase);
395 return false;
396 }
397 if ( (pCtx->cs.u32Limit != 0xffff)
398 || (pCtx->ds.u32Limit != 0xffff)
399 || (pCtx->es.u32Limit != 0xffff)
400 || (pCtx->ss.u32Limit != 0xffff)
401 || (pCtx->fs.u32Limit != 0xffff)
402 || (pCtx->gs.u32Limit != 0xffff))
403 {
404 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit);
405 return false;
406 }
407 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckRmOk);
408 }
409 else
410 {
411 /*
412 * Verify the requirements for executing code in protected mode. VT-x can't
413 * handle the CPU state right after a switch from real to protected mode
414 * (all sorts of RPL & DPL assumptions).
415 */
416 if (pVCpu->hm.s.vmx.fWasInRealMode)
417 {
418 /** @todo If guest is in V86 mode, these checks should be different! */
419 if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL))
420 {
421 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRpl);
422 return false;
423 }
424 if ( !hmVmxIsCodeSelectorOk(&pCtx->cs, pCtx->ss.Attr.n.u2Dpl)
425 || !hmVmxIsDataSelectorOk(&pCtx->ds)
426 || !hmVmxIsDataSelectorOk(&pCtx->es)
427 || !hmVmxIsDataSelectorOk(&pCtx->fs)
428 || !hmVmxIsDataSelectorOk(&pCtx->gs)
429 || !hmVmxIsStackSelectorOk(&pCtx->ss))
430 {
431 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
432 return false;
433 }
434 }
435 /* VT-x also chokes on invalid TR or LDTR selectors (minix). */
436 if (pCtx->gdtr.cbGdt)
437 {
438 if ((pCtx->tr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
439 {
440 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadTr);
441 return false;
442 }
443 else if ((pCtx->ldtr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
444 {
445 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadLdt);
446 return false;
447 }
448 }
449 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckPmOk);
450 }
451 }
452 else
453 {
454 if ( !CPUMIsGuestInLongModeEx(pCtx)
455 && !pVM->hm.s.vmx.fUnrestrictedGuest)
456 {
457 if ( !pVM->hm.s.fNestedPaging /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
458 || CPUMIsGuestInRealModeEx(pCtx)) /* Requires a fake TSS for real mode - stored in the VMM device heap */
459 return false;
460
461 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
462 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0)
463 return false;
464
465 /*
466 * The guest is about to complete the switch to protected mode. Wait a bit longer.
467 * Windows XP; switch to protected mode; all selectors are marked not present
468 * in the hidden registers (possible recompiler bug; see load_seg_vm).
469 */
470 /** @todo Is this supposed recompiler bug still relevant with IEM? */
471 if (pCtx->cs.Attr.n.u1Present == 0)
472 return false;
473 if (pCtx->ss.Attr.n.u1Present == 0)
474 return false;
475
476 /*
477 * Windows XP: possible same as above, but new recompiler requires new
478 * heuristics? VT-x doesn't seem to like something about the guest state and
479 * this stuff avoids it.
480 */
481 /** @todo This check is actually wrong, it doesn't take the direction of the
482 * stack segment into account. But, it does the job for now. */
483 if (pCtx->rsp >= pCtx->ss.u32Limit)
484 return false;
485 }
486 }
487 }
488
489 if (pVM->hm.s.vmx.fEnabled)
490 {
491 uint32_t uCr0Mask;
492
493 /* If bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
494 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr0Fixed0;
495
496 /* We ignore the NE bit here on purpose; see HMR0.cpp for details. */
497 uCr0Mask &= ~X86_CR0_NE;
498
499 if (fSupportsRealMode)
500 {
501 /* We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
502 uCr0Mask &= ~(X86_CR0_PG|X86_CR0_PE);
503 }
504 else
505 {
506 /* We support protected mode without paging using identity mapping. */
507 uCr0Mask &= ~X86_CR0_PG;
508 }
509 if ((pCtx->cr0 & uCr0Mask) != uCr0Mask)
510 return false;
511
512 /* If bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
513 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
514 if ((pCtx->cr0 & uCr0Mask) != 0)
515 return false;
516
517 /* If bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
518 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr4Fixed0;
519 uCr0Mask &= ~X86_CR4_VMXE;
520 if ((pCtx->cr4 & uCr0Mask) != uCr0Mask)
521 return false;
522
523 /* If bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
524 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
525 if ((pCtx->cr4 & uCr0Mask) != 0)
526 return false;
527
528 pVCpu->hm.s.fActive = true;
529 return true;
530 }
531
532 return false;
533}
534
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette