- Timestamp:
- Sep 5, 2019 10:40:31 AM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 133123
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r80598 r80602 4600 4600 } 4601 4601 4602 4602 4603 /** 4603 4604 * Exports the guest state with appropriate VM-entry and VM-exit controls in the … … 4646 4647 */ 4647 4648 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx)) 4649 { 4650 Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME); 4648 4651 fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST; 4652 } 4649 4653 else 4650 4654 Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST)); … … 6441 6445 { 6442 6446 /* 6447 * EFER.LME is written by software, while EFER.LMA is set by the CPU to (CR0.PG & EFER.LME). 6448 * This means a guest can set EFER.LME=1 while CR0.PG=0 and EFER.LMA can remain 0. 6449 * VT-x requires that "IA-32e mode guest" VM-entry control must be identical to EFER.LMA 6450 * and to CR0.PG. Without unrestricted execution, CR0.PG (used for VT-x, not the shadow) 6451 * must always be 1. This forces us to effectively clear both EFER.LMA and EFER.LME until 6452 * the guest has also set CR0.PG=1. Otherwise, we would run into an invalid-guest state 6453 * during VM-entry. 6454 */ 6455 uint64_t uGuestEferMsr = pCtx->msrEFER; 6456 if (!pVM->hm.s.vmx.fUnrestrictedGuest) 6457 { 6458 if (!(pCtx->msrEFER & MSR_K6_EFER_LMA)) 6459 uGuestEferMsr &= ~MSR_K6_EFER_LME; 6460 else 6461 Assert((pCtx->msrEFER & (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)) == (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)); 6462 } 6463 6464 /* 6443 6465 * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option 6444 6466 * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}. … … 6446 6468 if (pVM->hm.s.vmx.fSupportsVmcsEfer) 6447 6469 { 6448 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pCtx->msrEFER);6470 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, uGuestEferMsr); 6449 6471 AssertRC(rc); 6450 6472 } … … 6455 6477 * continue to intercept guest read and write accesses to it, see @bugref{7386#c16}. 6456 6478 */ 6457 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER, pCtx->msrEFER,6479 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER, uGuestEferMsr, 6458 6480 false /* fSetReadWrite */, false /* fUpdateHostMsr */); 6459 6481 AssertRCReturn(rc, rc); 6460 6482 } 6483 6484 Log4Func(("efer=%#RX64 shadow=%#RX64\n", uGuestEferMsr, pCtx->msrEFER)); 6461 6485 } 6462 6486 else if (!pVM->hm.s.vmx.fSupportsVmcsEfer) … … 13412 13436 { 13413 13437 case 0: 13414 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0); 13438 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0 13439 | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS); 13415 13440 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write); 13416 13441 Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
Note:
See TracChangeset
for help on using the changeset viewer.