VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp@ 73617

Last change on this file since 73617 was 73617, checked in by vboxsync, 6 years ago

VMM/HMVMXR0: Use IEMExecOne() rather than manually interpreting a select few instructions in the
real-on-v86 mode when unrestricted-guest execution is not allowed.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.3 KB
Line 
1/* $Id: HMVMXAll.cpp 73617 2018-08-10 14:09:55Z vboxsync $ */
2/** @file
3 * HM VMX (VT-x) - All contexts.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include "HMInternal.h"
25#include <VBox/vmm/vm.h>
26#include <VBox/vmm/pdmapi.h>
27
28
29/*********************************************************************************************************************************
30* Global Variables *
31*********************************************************************************************************************************/
32#define VMX_INSTR_DIAG_DESC(a_Def, a_Desc) #a_Def " - " #a_Desc
33static const char * const g_apszVmxInstrDiagDesc[kVmxVInstrDiag_Last] =
34{
35 /* Internal processing errors. */
36 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_1 , "Ipe_1" ),
37 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_2 , "Ipe_2" ),
38 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_3 , "Ipe_3" ),
39 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_4 , "Ipe_4" ),
40 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_5 , "Ipe_5" ),
41 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_6 , "Ipe_6" ),
42 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_7 , "Ipe_7" ),
43 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_8 , "Ipe_8" ),
44 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Ipe_9 , "Ipe_9" ),
45 /* VMXON. */
46 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_A20M , "A20M" ),
47 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cpl , "Cpl" ),
48 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr0Fixed0 , "Cr0Fixed0" ),
49 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Cr4Fixed0 , "Cr4Fixed0" ),
50 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Intercept , "Intercept" ),
51 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_LongModeCS , "LongModeCS" ),
52 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_MsrFeatCtl , "MsrFeatCtl" ),
53 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAlign , "PtrAlign" ),
54 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrAbnormal , "PtrAbnormal" ),
55 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrMap , "PtrMap" ),
56 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrPhysRead , "PtrPhysRead" ),
57 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_PtrWidth , "PtrWidth" ),
58 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_RealOrV86Mode , "RealOrV86Mode"),
59 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Success , "Success" ),
60 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_ShadowVmcs , "ShadowVmcs" ),
61 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_Vmxe , "Vmxe" ),
62 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmcsRevId , "VmcsRevId" ),
63 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxRoot , "VmxRoot" ),
64 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxon_VmxRootCpl , "VmxRootCpl" ),
65 /* VMXOFF. */
66 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Cpl , "Cpl" ),
67 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Intercept , "Intercept" ),
68 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_LongModeCS , "LongModeCS" ),
69 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_RealOrV86Mode, "RealOrV86Mode"),
70 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Success , "Success" ),
71 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_Vmxe , "Vmxe" ),
72 VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmxoff_VmxRoot , "VmxRoot" )
73 /* kVmxVInstrDiag_Last */
74};
75#undef VMX_INSTR_DIAG_DESC
76
77
78/**
79 * Gets a copy of the VMX host MSRs that were read by HM during ring-0
80 * initialization.
81 *
82 * @return VBox status code.
83 * @param pVM The cross context VM structure.
84 * @param pVmxMsrs Where to store the VMXMSRS struct (only valid when
85 * VINF_SUCCESS is returned).
86 *
87 * @remarks Caller needs to take care not to call this function too early. Call
88 * after HM initialization is fully complete.
89 */
90VMM_INT_DECL(int) HMVmxGetHostMsrs(PVM pVM, PVMXMSRS pVmxMsrs)
91{
92 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
93 AssertPtrReturn(pVmxMsrs, VERR_INVALID_PARAMETER);
94 if (pVM->hm.s.vmx.fSupported)
95 {
96 *pVmxMsrs = pVM->hm.s.vmx.Msrs;
97 return VINF_SUCCESS;
98 }
99 return VERR_VMX_NOT_SUPPORTED;
100}
101
102
103/**
104 * Gets the specified VMX host MSR that was read by HM during ring-0
105 * initialization.
106 *
107 * @return VBox status code.
108 * @param pVM The cross context VM structure.
109 * @param idMsr The MSR.
110 * @param puValue Where to store the MSR value (only updated when VINF_SUCCESS
111 * is returned).
112 *
113 * @remarks Caller needs to take care not to call this function too early. Call
114 * after HM initialization is fully complete.
115 */
116VMM_INT_DECL(int) HMVmxGetHostMsr(PVM pVM, uint32_t idMsr, uint64_t *puValue)
117{
118 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
119 AssertPtrReturn(puValue, VERR_INVALID_PARAMETER);
120
121 if (!pVM->hm.s.vmx.fSupported)
122 return VERR_VMX_NOT_SUPPORTED;
123
124 PCVMXMSRS pVmxMsrs = &pVM->hm.s.vmx.Msrs;
125 switch (idMsr)
126 {
127 case MSR_IA32_FEATURE_CONTROL: *puValue = pVmxMsrs->u64FeatCtrl; break;
128 case MSR_IA32_VMX_BASIC: *puValue = pVmxMsrs->u64Basic; break;
129 case MSR_IA32_VMX_PINBASED_CTLS: *puValue = pVmxMsrs->PinCtls.u; break;
130 case MSR_IA32_VMX_PROCBASED_CTLS: *puValue = pVmxMsrs->ProcCtls.u; break;
131 case MSR_IA32_VMX_PROCBASED_CTLS2: *puValue = pVmxMsrs->ProcCtls2.u; break;
132 case MSR_IA32_VMX_EXIT_CTLS: *puValue = pVmxMsrs->ExitCtls.u; break;
133 case MSR_IA32_VMX_ENTRY_CTLS: *puValue = pVmxMsrs->EntryCtls.u; break;
134 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: *puValue = pVmxMsrs->TruePinCtls.u; break;
135 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: *puValue = pVmxMsrs->TrueProcCtls.u; break;
136 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: *puValue = pVmxMsrs->TrueEntryCtls.u; break;
137 case MSR_IA32_VMX_TRUE_EXIT_CTLS: *puValue = pVmxMsrs->TrueExitCtls.u; break;
138 case MSR_IA32_VMX_MISC: *puValue = pVmxMsrs->u64Misc; break;
139 case MSR_IA32_VMX_CR0_FIXED0: *puValue = pVmxMsrs->u64Cr0Fixed0; break;
140 case MSR_IA32_VMX_CR0_FIXED1: *puValue = pVmxMsrs->u64Cr0Fixed1; break;
141 case MSR_IA32_VMX_CR4_FIXED0: *puValue = pVmxMsrs->u64Cr4Fixed0; break;
142 case MSR_IA32_VMX_CR4_FIXED1: *puValue = pVmxMsrs->u64Cr4Fixed1; break;
143 case MSR_IA32_VMX_VMCS_ENUM: *puValue = pVmxMsrs->u64VmcsEnum; break;
144 case MSR_IA32_VMX_VMFUNC: *puValue = pVmxMsrs->u64VmFunc; break;
145 case MSR_IA32_VMX_EPT_VPID_CAP: *puValue = pVmxMsrs->u64EptVpidCaps; break;
146 default:
147 {
148 AssertMsgFailed(("Invalid MSR %#x\n", idMsr));
149 return VERR_NOT_FOUND;
150 }
151 }
152 return VINF_SUCCESS;
153}
154
155
156/**
157 * Gets the description of a VMX instruction diagnostic enum member.
158 *
159 * @returns The descriptive string.
160 * @param enmInstrDiag The VMX instruction diagnostic.
161 */
162VMM_INT_DECL(const char *) HMVmxGetInstrDiagDesc(VMXVINSTRDIAG enmInstrDiag)
163{
164 if (RT_LIKELY((unsigned)enmInstrDiag < RT_ELEMENTS(g_apszVmxInstrDiagDesc)))
165 return g_apszVmxInstrDiagDesc[enmInstrDiag];
166 return "Unknown/invalid";
167}
168
169
170/**
171 * Checks if a code selector (CS) is suitable for execution using hardware-assisted
172 * VMX when unrestricted execution isn't available.
173 *
174 * @returns true if selector is suitable for VMX, otherwise
175 * false.
176 * @param pSel Pointer to the selector to check (CS).
177 * @param uStackDpl The CPL, aka the DPL of the stack segment.
178 */
179static bool hmVmxIsCodeSelectorOk(PCCPUMSELREG pSel, unsigned uStackDpl)
180{
181 /*
182 * Segment must be an accessed code segment, it must be present and it must
183 * be usable.
184 * Note! These are all standard requirements and if CS holds anything else
185 * we've got buggy code somewhere!
186 */
187 AssertCompile(X86DESCATTR_TYPE == 0xf);
188 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P | X86DESCATTR_UNUSABLE))
189 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P),
190 ("%#x\n", pSel->Attr.u),
191 false);
192
193 /* For conforming segments, CS.DPL must be <= SS.DPL, while CS.DPL
194 must equal SS.DPL for non-confroming segments.
195 Note! This is also a hard requirement like above. */
196 AssertMsgReturn( pSel->Attr.n.u4Type & X86_SEL_TYPE_CONF
197 ? pSel->Attr.n.u2Dpl <= uStackDpl
198 : pSel->Attr.n.u2Dpl == uStackDpl,
199 ("u4Type=%#x u2Dpl=%u uStackDpl=%u\n", pSel->Attr.n.u4Type, pSel->Attr.n.u2Dpl, uStackDpl),
200 false);
201
202 /*
203 * The following two requirements are VT-x specific:
204 * - G bit must be set if any high limit bits are set.
205 * - G bit must be clear if any low limit bits are clear.
206 */
207 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
208 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
209 return true;
210 return false;
211}
212
213
214/**
215 * Checks if a data selector (DS/ES/FS/GS) is suitable for execution using
216 * hardware-assisted VMX when unrestricted execution isn't available.
217 *
218 * @returns true if selector is suitable for VMX, otherwise
219 * false.
220 * @param pSel Pointer to the selector to check
221 * (DS/ES/FS/GS).
222 */
223static bool hmVmxIsDataSelectorOk(PCCPUMSELREG pSel)
224{
225 /*
226 * Unusable segments are OK. These days they should be marked as such, as
227 * but as an alternative we for old saved states and AMD<->VT-x migration
228 * we also treat segments with all the attributes cleared as unusable.
229 */
230 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
231 return true;
232
233 /** @todo tighten these checks. Will require CPUM load adjusting. */
234
235 /* Segment must be accessed. */
236 if (pSel->Attr.u & X86_SEL_TYPE_ACCESSED)
237 {
238 /* Code segments must also be readable. */
239 if ( !(pSel->Attr.u & X86_SEL_TYPE_CODE)
240 || (pSel->Attr.u & X86_SEL_TYPE_READ))
241 {
242 /* The S bit must be set. */
243 if (pSel->Attr.n.u1DescType)
244 {
245 /* Except for conforming segments, DPL >= RPL. */
246 if ( pSel->Attr.n.u2Dpl >= (pSel->Sel & X86_SEL_RPL)
247 || pSel->Attr.n.u4Type >= X86_SEL_TYPE_ER_ACC)
248 {
249 /* Segment must be present. */
250 if (pSel->Attr.n.u1Present)
251 {
252 /*
253 * The following two requirements are VT-x specific:
254 * - G bit must be set if any high limit bits are set.
255 * - G bit must be clear if any low limit bits are clear.
256 */
257 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
258 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
259 return true;
260 }
261 }
262 }
263 }
264 }
265
266 return false;
267}
268
269
270/**
271 * Checks if the stack selector (SS) is suitable for execution using
272 * hardware-assisted VMX when unrestricted execution isn't available.
273 *
274 * @returns true if selector is suitable for VMX, otherwise
275 * false.
276 * @param pSel Pointer to the selector to check (SS).
277 */
278static bool hmVmxIsStackSelectorOk(PCCPUMSELREG pSel)
279{
280 /*
281 * Unusable segments are OK. These days they should be marked as such, as
282 * but as an alternative we for old saved states and AMD<->VT-x migration
283 * we also treat segments with all the attributes cleared as unusable.
284 */
285 /** @todo r=bird: actually all zeroes isn't gonna cut it... SS.DPL == CPL. */
286 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)
287 return true;
288
289 /*
290 * Segment must be an accessed writable segment, it must be present.
291 * Note! These are all standard requirements and if SS holds anything else
292 * we've got buggy code somewhere!
293 */
294 AssertCompile(X86DESCATTR_TYPE == 0xf);
295 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P | X86_SEL_TYPE_CODE))
296 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P),
297 ("%#x\n", pSel->Attr.u), false);
298
299 /* DPL must equal RPL.
300 Note! This is also a hard requirement like above. */
301 AssertMsgReturn(pSel->Attr.n.u2Dpl == (pSel->Sel & X86_SEL_RPL),
302 ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel), false);
303
304 /*
305 * The following two requirements are VT-x specific:
306 * - G bit must be set if any high limit bits are set.
307 * - G bit must be clear if any low limit bits are clear.
308 */
309 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)
310 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))
311 return true;
312 return false;
313}
314
315
316/**
317 * Checks if the guest is in a suitable state for hardware-assisted VMX execution.
318 *
319 * @returns @c true if it is suitable, @c false otherwise.
320 * @param pVCpu The cross context virtual CPU structure.
321 * @param pCtx Pointer to the guest CPU context.
322 *
323 * @remarks @a pCtx can be a partial context and thus may not be necessarily the
324 * same as pVCpu->cpum.GstCtx! Thus don't eliminate the @a pCtx parameter.
325 * Secondly, if additional checks are added that require more of the CPU
326 * state, make sure REM (which supplies a partial state) is updated.
327 */
328VMM_INT_DECL(bool) HMVmxCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx)
329{
330 PVM pVM = pVCpu->CTX_SUFF(pVM);
331 Assert(HMIsEnabled(pVM));
332 Assert(!CPUMIsGuestVmxEnabled(pCtx));
333 Assert( ( pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS)
334 || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));
335
336 pVCpu->hm.s.fActive = false;
337
338 bool const fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVmmDevHeapIsEnabled(pVM);
339 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
340 {
341 /*
342 * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted
343 * guest execution feature is missing (VT-x only).
344 */
345 if (fSupportsRealMode)
346 {
347 if (CPUMIsGuestInRealModeEx(pCtx))
348 {
349 /*
350 * In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector
351 * bases and limits, i.e. limit must be 64K and base must be selector * 16.
352 * If this is not true, we cannot execute real mode as V86 and have to fall
353 * back to emulation.
354 */
355 if ( pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)
356 || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)
357 || pCtx->es.Sel != (pCtx->es.u64Base >> 4)
358 || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)
359 || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)
360 || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4))
361 {
362 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelBase);
363 return false;
364 }
365 if ( (pCtx->cs.u32Limit != 0xffff)
366 || (pCtx->ds.u32Limit != 0xffff)
367 || (pCtx->es.u32Limit != 0xffff)
368 || (pCtx->ss.u32Limit != 0xffff)
369 || (pCtx->fs.u32Limit != 0xffff)
370 || (pCtx->gs.u32Limit != 0xffff))
371 {
372 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit);
373 return false;
374 }
375 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckRmOk);
376 }
377 else
378 {
379 /*
380 * Verify the requirements for executing code in protected mode. VT-x can't
381 * handle the CPU state right after a switch from real to protected mode
382 * (all sorts of RPL & DPL assumptions).
383 */
384 if (pVCpu->hm.s.vmx.fWasInRealMode)
385 {
386 /** @todo If guest is in V86 mode, these checks should be different! */
387 if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL))
388 {
389 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRpl);
390 return false;
391 }
392 if ( !hmVmxIsCodeSelectorOk(&pCtx->cs, pCtx->ss.Attr.n.u2Dpl)
393 || !hmVmxIsDataSelectorOk(&pCtx->ds)
394 || !hmVmxIsDataSelectorOk(&pCtx->es)
395 || !hmVmxIsDataSelectorOk(&pCtx->fs)
396 || !hmVmxIsDataSelectorOk(&pCtx->gs)
397 || !hmVmxIsStackSelectorOk(&pCtx->ss))
398 {
399 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);
400 return false;
401 }
402 }
403 /* VT-x also chokes on invalid TR or LDTR selectors (minix). */
404 if (pCtx->gdtr.cbGdt)
405 {
406 if ((pCtx->tr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
407 {
408 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadTr);
409 return false;
410 }
411 else if ((pCtx->ldtr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
412 {
413 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadLdt);
414 return false;
415 }
416 }
417 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckPmOk);
418 }
419 }
420 else
421 {
422 if ( !CPUMIsGuestInLongModeEx(pCtx)
423 && !pVM->hm.s.vmx.fUnrestrictedGuest)
424 {
425 if ( !pVM->hm.s.fNestedPaging /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */
426 || CPUMIsGuestInRealModeEx(pCtx)) /* Requires a fake TSS for real mode - stored in the VMM device heap */
427 return false;
428
429 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */
430 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0)
431 return false;
432
433 /*
434 * The guest is about to complete the switch to protected mode. Wait a bit longer.
435 * Windows XP; switch to protected mode; all selectors are marked not present
436 * in the hidden registers (possible recompiler bug; see load_seg_vm).
437 */
438 /** @todo Is this supposed recompiler bug still relevant with IEM? */
439 if (pCtx->cs.Attr.n.u1Present == 0)
440 return false;
441 if (pCtx->ss.Attr.n.u1Present == 0)
442 return false;
443
444 /*
445 * Windows XP: possible same as above, but new recompiler requires new
446 * heuristics? VT-x doesn't seem to like something about the guest state and
447 * this stuff avoids it.
448 */
449 /** @todo This check is actually wrong, it doesn't take the direction of the
450 * stack segment into account. But, it does the job for now. */
451 if (pCtx->rsp >= pCtx->ss.u32Limit)
452 return false;
453 }
454 }
455 }
456
457 if (pVM->hm.s.vmx.fEnabled)
458 {
459 uint32_t uCr0Mask;
460
461 /* If bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
462 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr0Fixed0;
463
464 /* We ignore the NE bit here on purpose; see HMR0.cpp for details. */
465 uCr0Mask &= ~X86_CR0_NE;
466
467 if (fSupportsRealMode)
468 {
469 /* We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */
470 uCr0Mask &= ~(X86_CR0_PG|X86_CR0_PE);
471 }
472 else
473 {
474 /* We support protected mode without paging using identity mapping. */
475 uCr0Mask &= ~X86_CR0_PG;
476 }
477 if ((pCtx->cr0 & uCr0Mask) != uCr0Mask)
478 return false;
479
480 /* If bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
481 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;
482 if ((pCtx->cr0 & uCr0Mask) != 0)
483 return false;
484
485 /* If bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
486 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr4Fixed0;
487 uCr0Mask &= ~X86_CR4_VMXE;
488 if ((pCtx->cr4 & uCr0Mask) != uCr0Mask)
489 return false;
490
491 /* If bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
492 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;
493 if ((pCtx->cr4 & uCr0Mask) != 0)
494 return false;
495
496 pVCpu->hm.s.fActive = true;
497 return true;
498 }
499
500 return false;
501}
502
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette