VirtualBox

Changeset 51238 in vbox for trunk


Ignore:
Timestamp:
May 13, 2014 11:36:49 AM (11 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: Fixed invalid EFER swapping when guest is using PAE paging (when VT-x UX is not in effect).
Also fixes swapping EFER for 32-bit PAE hosts and 32-bit PAE guests when EFER.NXE differs when guest is using PAE paging.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r51230 r51238  
    15741574static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu)
    15751575{
    1576     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1577 
    15781576    uint32_t u32Val;
    15791577    int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
     
    31763174     * Otherwise it's done as part of auto-load/store MSR area in the VMCS, see hmR0VmxLoadGuestMsrs().
    31773175     */
    3178 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    3179     if (   HMVMX_IS_64BIT_HOST_MODE()
    3180         && pVM->hm.s.vmx.fSupportsVmcsEfer)
     3176    if (pVM->hm.s.vmx.fSupportsVmcsEfer)
    31813177    {
    31823178        rc = VMXWriteVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, pVM->hm.s.vmx.u64HostEfer);
    31833179        AssertRCReturn(rc, rc);
    31843180    }
    3185 #endif
    31863181
    31873182    /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see
     
    32153210    return true;
    32163211#endif
     3212
     3213#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     3214    /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */
     3215    if (CPUMIsGuestInLongMode(pVCpu))
     3216        return false;
     3217#endif
     3218
    32173219    PVM      pVM          = pVCpu->CTX_SUFF(pVM);
    32183220    uint64_t u64HostEfer  = pVM->hm.s.vmx.u64HostEfer;
     
    32303232
    32313233    /*
    3232      * If the guest uses PAE and EFER.NXE bit differs, we need to swap as it affects guest paging.
    3233      * 64-bit paging implies CR4.PAE as well. See Intel spec. 4.5 "IA32e Paging".
     3234     * If the guest uses PAE and EFER.NXE bit differs, we need to swap EFER as it                    .
     3235     * affects guest paging. 64-bit paging implies CR4.PAE as well.
     3236     * See Intel spec. 4.5 "IA32e Paging" and Intel spec. 4.1.1 "Three Paging Modes".
    32343237     */
    32353238    if (   (pMixedCtx->cr4 & X86_CR4_PAE)
     3239        && (pMixedCtx->cr0 & X86_CR0_PG)
    32363240        && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
    32373241    {
     3242        /* Assert that host is PAE capable. */
     3243        Assert(pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_NX);
    32383244        return true;
    32393245    }
     
    32813287
    32823288        /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */
    3283 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    3284         if (   HMVMX_IS_64BIT_HOST_MODE()
    3285             && pVM->hm.s.vmx.fSupportsVmcsEfer
     3289        if (   pVM->hm.s.vmx.fSupportsVmcsEfer
    32863290            && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
    32873291        {
    32883292            val |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR;
    3289             Log4(("Load: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n"));
    3290         }
    3291 #endif
     3293            Log4(("Load: VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n"));
     3294        }
    32923295
    32933296        /*
     
    33543357            val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
    33553358            Log4(("Load: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n"));
    3356 
    3357             /* If the newer VMCS fields for managing EFER exists, use it. */
    3358             if (   pVM->hm.s.vmx.fSupportsVmcsEfer
    3359                 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
    3360             {
    3361                 val |=   VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR
    3362                        | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;
    3363             }
    33643359        }
    33653360        else
    33663361            Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
    3367 #elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
     3362#else
    33683363        if (CPUMIsGuestInLongModeEx(pMixedCtx))
    33693364        {
     
    33743369        else
    33753370            Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
    3376 #endif
     3371#endif  /* HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
     3372
     3373        /* If the newer VMCS fields for managing EFER exists, use it. */
     3374        if (   pVM->hm.s.vmx.fSupportsVmcsEfer
     3375            && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
     3376        {
     3377            val |=   VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR
     3378                   | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;
     3379            Log4(("Load: VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n"));
     3380        }
    33773381
    33783382        /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
     
    47204724    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))
    47214725    {
    4722 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    4723         if (HMVMX_IS_64BIT_HOST_MODE())
     4726        if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
    47244727        {
    47254728            /*
     
    47294732            if (pVM->hm.s.vmx.fSupportsVmcsEfer)
    47304733            {
    4731                 /* Not strictly necessary to check hmR0VmxShouldSwapEferMsr() here, but it avoids
    4732                    one VM-write when we're a nested guest. */
    4733                 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
    4734                 {
    4735                     int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER);
    4736                     AssertRCReturn(rc,rc);
    4737                     Log4(("Load: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pMixedCtx->msrEFER));
    4738                 }
     4734                int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER);
     4735                AssertRCReturn(rc,rc);
     4736                Log4(("Load: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pMixedCtx->msrEFER));
    47394737            }
    47404738            else
    47414739            {
    4742                 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
    4743                 {
    4744                     hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */);
    4745                     /* We need to intercept reads too, see @bugref{7386} comment #16. */
    4746                     hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
    4747                     Log4(("Load: MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pMixedCtx->msrEFER,
    4748                           pVCpu->hm.s.vmx.cMsrs));
    4749                 }
    4750                 else
    4751                     hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER);
     4740                hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */);
     4741                /* We need to intercept reads too, see @bugref{7386} comment #16. */
     4742                hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
     4743                Log4(("Load: MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pMixedCtx->msrEFER,
     4744                      pVCpu->hm.s.vmx.cMsrs));
    47524745            }
    47534746        }
    4754 #endif
     4747        else if (!pVM->hm.s.vmx.fSupportsVmcsEfer)
     4748            hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER);
    47554749        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
    47564750    }
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette