VirtualBox

Changeset 80602 in vbox for trunk/src


Ignore:
Timestamp:
Sep 5, 2019 10:40:31 AM (6 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
133123
Message:

VMM/HMVMXR0: Nested VMX: bugref:9180 Clear EFER.LME (in VMCS) when guest has not yet set CR0.PG. This is required when Unrestricted guest execution is not present as it forces VMCS' CR0.PG=1.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r80598 r80602  
    46004600}
    46014601
     4602
    46024603/**
    46034604 * Exports the guest state with appropriate VM-entry and VM-exit controls in the
     
    46464647             */
    46474648            if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.GstCtx))
     4649            {
     4650                Assert(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LME);
    46484651                fVal |= VMX_ENTRY_CTLS_IA32E_MODE_GUEST;
     4652            }
    46494653            else
    46504654                Assert(!(fVal & VMX_ENTRY_CTLS_IA32E_MODE_GUEST));
     
    64416445        {
    64426446            /*
     6447             * EFER.LME is written by software, while EFER.LMA is set by the CPU to (CR0.PG & EFER.LME).
     6448             * This means a guest can set EFER.LME=1 while CR0.PG=0 and EFER.LMA can remain 0.
     6449             * VT-x requires that "IA-32e mode guest" VM-entry control must be identical to EFER.LMA
     6450             * and to CR0.PG. Without unrestricted execution, CR0.PG (used for VT-x, not the shadow)
     6451             * must always be 1. This forces us to effectively clear both EFER.LMA and EFER.LME until
     6452             * the guest has also set CR0.PG=1. Otherwise, we would run into an invalid-guest state
     6453             * during VM-entry.
     6454             */
     6455            uint64_t uGuestEferMsr = pCtx->msrEFER;
     6456            if (!pVM->hm.s.vmx.fUnrestrictedGuest)
     6457            {
     6458                if (!(pCtx->msrEFER & MSR_K6_EFER_LMA))
     6459                    uGuestEferMsr &= ~MSR_K6_EFER_LME;
     6460                else
     6461                    Assert((pCtx->msrEFER & (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)) == (MSR_K6_EFER_LMA | MSR_K6_EFER_LME));
     6462            }
     6463
     6464            /*
    64436465             * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
    64446466             * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
     
    64466468            if (pVM->hm.s.vmx.fSupportsVmcsEfer)
    64476469            {
    6448                 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pCtx->msrEFER);
     6470                int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, uGuestEferMsr);
    64496471                AssertRC(rc);
    64506472            }
     
    64556477                 * continue to intercept guest read and write accesses to it, see @bugref{7386#c16}.
    64566478                 */
    6457                 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER, pCtx->msrEFER,
     6479                int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, pVmxTransient, MSR_K6_EFER, uGuestEferMsr,
    64586480                                                    false /* fSetReadWrite */, false /* fUpdateHostMsr */);
    64596481                AssertRCReturn(rc, rc);
    64606482            }
     6483
     6484            Log4Func(("efer=%#RX64 shadow=%#RX64\n", uGuestEferMsr, pCtx->msrEFER));
    64616485        }
    64626486        else if (!pVM->hm.s.vmx.fSupportsVmcsEfer)
     
    1341213436    {
    1341313437        case 0:
    13414             ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0);
     13438            ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS | HM_CHANGED_GUEST_CR0
     13439                                                     | HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_EXIT_CTLS);
    1341513440            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write);
    1341613441            Log4Func(("CR0 write. rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cr0));
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette