VirtualBox

Changeset 51220 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
May 9, 2014 1:51:16 AM (11 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
93632
Message:

VMM/HMVMXR0: Implemented EFER swapping using VMCS controls.

Location:
trunk/src/VBox/VMM
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r51186 r51220  
    13591359     * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks".
    13601360     */
    1361     if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_SVM_GUEST_EFER_MSR))
     1361    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))
    13621362    {
    13631363        pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
    13641364        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER;
    1365         HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_SVM_GUEST_EFER_MSR);
     1365        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
    13661366    }
    13671367
     
    44334433        }
    44344434        else if (pCtx->ecx == MSR_K6_EFER)
    4435             HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_EFER_MSR);
     4435            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
    44364436        else if (pCtx->ecx == MSR_IA32_TSC)
    44374437            pSvmTransient->fUpdateTscOffsetting = true;
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r51182 r51220  
    15561556
    15571557
     1558/**
     1559 * Verifies that our cached values of the VMCS controls are all
     1560 * consistent with what's actually present in the VMCS.
     1561 *
     1562 * @returns VBox status code.
     1563 * @param pVCpu     Pointer to the VMCPU.
     1564 */
     1565static int hmR0VmxCheckVmcsCtls(PVMCPU pVCpu)
     1566{
     1567    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1568
     1569    uint32_t u32Val;
     1570    int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val);
     1571    AssertRCReturn(rc, rc);
     1572    AssertMsgReturn(pVCpu->hm.s.vmx.u32EntryCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32EntryCtls, u32Val),
     1573                    VERR_VMX_ENTRY_CTLS_CACHE_INVALID);
     1574
     1575    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val);
     1576    AssertRCReturn(rc, rc);
     1577    AssertMsgReturn(pVCpu->hm.s.vmx.u32ExitCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ExitCtls, u32Val),
     1578                    VERR_VMX_EXIT_CTLS_CACHE_INVALID);
     1579
     1580    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);
     1581    AssertRCReturn(rc, rc);
     1582    AssertMsgReturn(pVCpu->hm.s.vmx.u32PinCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32PinCtls, u32Val),
     1583                    VERR_VMX_PIN_EXEC_CTLS_CACHE_INVALID);
     1584
     1585    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
     1586    AssertRCReturn(rc, rc);
     1587    AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls, u32Val),
     1588                    VERR_VMX_PROC_EXEC_CTLS_CACHE_INVALID);
     1589
     1590    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val);
     1591    AssertRCReturn(rc, rc);
     1592    AssertMsgReturn(pVCpu->hm.s.vmx.u32ProcCtls2 == u32Val, ("Cache=%#RX32 VMCS=%#RX32", pVCpu->hm.s.vmx.u32ProcCtls2, u32Val),
     1593                    VERR_VMX_PROC_EXEC2_CTLS_CACHE_INVALID);
     1594
     1595    return VINF_SUCCESS;
     1596}
     1597
     1598
    15581599#ifdef VBOX_STRICT
     1600/**
     1601 * Verifies that our cached host EFER value has not changed
     1602 * since we cached it.
     1603 *
     1604 * @param pVCpu         Pointer to the VMCPU.
     1605 */
     1606static void hmR0VmxCheckHostEferMsr(PVMCPU pVCpu)
     1607{
     1608    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1609
     1610    if (pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
     1611    {
     1612        uint64_t u64Val;
     1613        int rc = VMXReadVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, &u64Val);
     1614        AssertRC(rc);
     1615
     1616        uint64_t u64HostEferMsr = ASMRdMsr(MSR_K6_EFER);
     1617        AssertMsgReturnVoid(u64HostEferMsr == u64Val, ("u64HostEferMsr=%#RX64 u64Val=%#RX64\n", u64HostEferMsr, u64Val));
     1618    }
     1619}
     1620
     1621
    15591622/**
    15601623 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the
     
    26602723        return rc;
    26612724    }
     2725
     2726    /* Check if we can use the VMCS controls for swapping the EFER MSR. */
     2727    Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
     2728#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     2729    if (   HMVMX_IS_64BIT_HOST_MODE()
     2730        && (pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
     2731        && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1  & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
     2732        && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1  & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR))
     2733    {
     2734        pVM->hm.s.vmx.fSupportsVmcsEfer = true;
     2735    }
     2736#endif
    26622737
    26632738    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     
    30793154    AssertRCReturn(rc, rc);
    30803155
    3081     /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT, IA32_EFER, also see
    3082      *        hmR0VmxSetupExitCtls() !! */
     3156    /*
     3157     * If the CPU supports the newer VMCS controls for managing EFER, use it.
     3158     */
     3159#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     3160    if (   HMVMX_IS_64BIT_HOST_MODE()
     3161        && pVM->hm.s.vmx.fSupportsVmcsEfer)
     3162    {
     3163        rc = VMXWriteVmcs64(VMX_VMCS64_HOST_FIELD_EFER_FULL, pVM->hm.s.vmx.u64HostEfer);
     3164        AssertRCReturn(rc, rc);
     3165    }
     3166#endif
     3167
     3168    /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see
     3169     *        hmR0VmxLoadGuestExitCtls() !! */
     3170
    30833171    return rc;
     3172}
     3173
     3174
     3175/**
     3176 * Figures out if we need to swap the EFER MSR which is
     3177 * particularly expensive.
     3178 *
     3179 * We check all relevant bits. For now, that's everything
     3180 * besides LMA/LME, as these two bits are handled by VM-entry,
     3181 * see hmR0VmxLoadGuestExitCtls() and
     3182 * hmR0VMxLoadGuestEntryCtls().
     3183 *
     3184 * @returns true if we need to load guest EFER, false otherwise.
     3185 * @param   pVCpu       Pointer to the VMCPU.
     3186 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     3187 *                      out-of-sync. Make sure to update the required fields
     3188 *                      before using them.
     3189 *
     3190 * @remarks Requires EFER, CR4.
     3191 * @remarks No-long-jump zone!!!
     3192 */
     3193static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     3194{
     3195    PVM      pVM          = pVCpu->CTX_SUFF(pVM);
     3196    uint64_t u64HostEfer  = pVM->hm.s.vmx.u64HostEfer;
     3197    uint64_t u64GuestEfer = pMixedCtx->msrEFER;
     3198
     3199    /*
     3200     * For 64-bit guests, if EFER.SCE bit differs, we need to swap to ensure that the
     3201     * guest's SYSCALL behaviour isn't screwed. See @bugref{7386}.
     3202     */
     3203    if (   pVM->hm.s.fAllow64BitGuests
     3204        && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
     3205    {
     3206        return true;
     3207    }
     3208
     3209    /*
     3210     * If the guest uses PAE and EFER.NXE bit differs, we need to swap as it affects guest paging.
     3211     * 64-bit paging implies CR4.PAE as well. See Intel spec. 4.5 "IA32e Paging".
     3212     */
     3213    if (   (pMixedCtx->cr4 & X86_CR4_PAE)
     3214        && (u64GuestEfer & MSR_K6_EFER_NXE) != (u64HostEfer & MSR_K6_EFER_NXE))
     3215    {
     3216        return true;
     3217    }
     3218
     3219    /** @todo Check the latest Intel spec. for any other bits,
     3220     *        like SMEP/SMAP? */
     3221    return false;
    30843222}
    30853223
     
    30963234 *                      before using them.
    30973235 *
     3236 * @remarks Requires EFER.
    30983237 * @remarks No-long-jump zone!!!
    30993238 */
     
    31123251        /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
    31133252        if (CPUMIsGuestInLongModeEx(pMixedCtx))
     3253        {
    31143254            val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
     3255            Log4(("Load: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n"));
     3256        }
    31153257        else
    31163258            Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
     3259
     3260        /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */
     3261#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     3262        if (   HMVMX_IS_64BIT_HOST_MODE()
     3263            && pVM->hm.s.vmx.fSupportsVmcsEfer
     3264            && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
     3265        {
     3266            val |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR;
     3267            Log4(("Load: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n"));
     3268        }
     3269#endif
    31173270
    31183271        /*
     
    31233276
    31243277        /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
    3125          *        VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR,
    3126          *        VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR */
     3278         *        VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR. */
    31273279
    31283280        if ((val & zap) != val)
     
    31553307 *                      before using them.
    31563308 *
    3157  * @remarks requires EFER.
     3309 * @remarks Requires EFER.
    31583310 */
    31593311DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     
    31773329#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    31783330        if (HMVMX_IS_64BIT_HOST_MODE())
     3331        {
    31793332            val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
     3333            Log4(("Load: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n"));
     3334
     3335            /* If the newer VMCS fields for managing EFER exists, use it. */
     3336            if (   pVM->hm.s.vmx.fSupportsVmcsEfer
     3337                && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
     3338            {
     3339                val |=   VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR
     3340                       | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;
     3341            }
     3342        }
    31803343        else
    31813344            Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
    31823345#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
    31833346        if (CPUMIsGuestInLongModeEx(pMixedCtx))
    3184             val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;    /* The switcher goes to long mode. */
     3347        {
     3348            /* The switcher returns to long mode, EFER is managed by the switcher. */
     3349            val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
     3350            Log4(("Load: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n"));
     3351        }
    31853352        else
    31863353            Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
     
    31923359        /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
    31933360         *        VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
    3194          *        VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR,
    3195          *        VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR,
    3196          *        VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR. */
     3361         *        VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR. */
    31973362
    31983363        if (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)
     
    45304695    }
    45314696
     4697    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))
     4698    {
     4699#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     4700        if (   HMVMX_IS_64BIT_HOST_MODE()
     4701            && pVM->hm.s.vmx.fSupportsVmcsEfer
     4702            && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))  /* Not really needed here, but avoids a VM-write as a nested guest. */
     4703        {
     4704            int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER);
     4705            AssertRCReturn(rc,rc);
     4706            Log4(("Load: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pMixedCtx->msrEFER));
     4707        }
     4708#endif
     4709        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR);
     4710    }
     4711
    45324712    return VINF_SUCCESS;
    45334713}
     
    45874767            {
    45884768                /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
    4589                 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS),
    4590                           ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
     4769                AssertMsg(HMCPU_CF_IS_SET(pVCpu,   HM_CHANGED_HOST_CONTEXT
     4770                                                 | HM_CHANGED_VMX_EXIT_CTLS
     4771                                                 | HM_CHANGED_VMX_ENTRY_CTLS
     4772                                                 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
    45914773            }
    45924774            pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
     
    46064788            {
    46074789                /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
    4608                 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_EXIT_CTLS | HM_CHANGED_VMX_ENTRY_CTLS),
    4609                           ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
     4790                AssertMsg(HMCPU_CF_IS_SET(pVCpu,   HM_CHANGED_HOST_CONTEXT
     4791                                                 | HM_CHANGED_VMX_EXIT_CTLS
     4792                                                 | HM_CHANGED_VMX_ENTRY_CTLS
     4793                                                 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
    46104794            }
    46114795            pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
     
    78928076    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    78938077
     8078    /* This needs to be done after hmR0VmxLoadGuestEntryCtls() and hmR0VmxLoadGuestExitCtls() as it may alter controls if we
     8079       determine we don't have to swap EFER after all. */
    78948080    rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
    78958081    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadSharedMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     
    80038189              ||  HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE),
    80048190              ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
    8005 
    8006 #ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
    8007     uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
    8008     if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
    8009         Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
    8010 #endif
    80118191}
    80128192
     
    82828462        }
    82838463    }
     8464
    82848465#ifdef VBOX_STRICT
    82858466    hmR0VmxCheckAutoLoadStoreMsrs(pVCpu);
     8467    hmR0VmxCheckHostEferMsr(pVCpu);
     8468    AssertRC(hmR0VmxCheckVmcsCtls(pVCpu));
     8469#endif
     8470#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
     8471    uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
     8472    if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
     8473        Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
    82868474#endif
    82878475}
     
    83428530#endif
    83438531    pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED;          /* Use VMRESUME instead of VMLAUNCH in the next run. */
     8532#ifdef VBOX_STRICT
     8533    hmR0VmxCheckHostEferMsr(pVCpu);                                   /* Verify that VMRUN/VMLAUNCH didn't modify host EFER. */
     8534#endif
    83448535    ASMSetFlags(pVmxTransient->uEflags);                              /* Enable interrupts. */
    83458536    VMMRZCallRing3Enable(pVCpu);                                      /* It is now safe to do longjmps to ring-3!!! */
     
    87638954 * @param   pVCpu   Pointer to the VMCPU.
    87648955 * @param   pCtx    Pointer to the guest-CPU state.
     8956 *
     8957 * @remarks This function assumes our cache of the VMCS controls
     8958 *          are valid, i.e. hmR0VmxCheckVmcsCtls() succeeded.
    87658959 */
    87668960static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     
    89649158        if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
    89659159        {
     9160            Assert(pVM->hm.s.vmx.fSupportsVmcsEfer);
    89669161            rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
    89679162            AssertRCBreak(rc);
    89689163            HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
    89699164                              VMX_IGS_EFER_MSR_RESERVED);               /* Bits 63:12, bit 9, bits 7:1 MBZ. */
    8970             HMVMX_CHECK_BREAK((u64Val & MSR_K6_EFER_LMA) == (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
     9165            HMVMX_CHECK_BREAK(RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
    89719166                              VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
    89729167            HMVMX_CHECK_BREAK(   fUnrestrictedGuest
    8973                               || (u64Val & MSR_K6_EFER_LMA) == (u32GuestCR0 & X86_CR0_PG), VMX_IGS_EFER_LMA_PG_MISMATCH);
     9168                              || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u32GuestCR0 & X86_CR0_PG),
     9169                              VMX_IGS_EFER_LMA_PG_MISMATCH);
    89749170        }
    89759171
     
    995810154    AssertRCReturn(rc, rc);
    995910155
     10156    rc = hmR0VmxCheckVmcsCtls(pVCpu);
     10157    AssertRCReturn(rc, rc);
     10158
    996010159    uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
    996110160    NOREF(uInvalidReason);
     
    1016310362        else if (pMixedCtx->ecx == MSR_IA32_TSC)        /* Windows 7 does this during bootup. See @bugref{6398}. */
    1016410363            pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
     10364        else if (pMixedCtx->ecx == MSR_K6_EFER)
     10365        {
     10366            /*
     10367             * If the guest touches EFER we need to update the VM-Entry and VM-Exit controls as well,
     10368             * even if it is -not- touching bits that cause paging mode changes (LMA/LME). We care about
     10369             * the other bits as well, SCE and NXE. See @bugref{7368}.
     10370             */
     10371            HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS);
     10372        }
    1016510373
    1016610374        /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r50918 r51220  
    352352     * Misc initialisation.
    353353     */
    354     //pVM->hm.s.vmx.fSupported = false;
    355     //pVM->hm.s.svm.fSupported = false;
    356     //pVM->hm.s.vmx.fEnabled   = false;
    357     //pVM->hm.s.svm.fEnabled   = false;
    358     //pVM->hm.s.fNestedPaging  = false;
    359 
     354#if 0
     355    pVM->hm.s.vmx.fSupported = false;
     356    pVM->hm.s.svm.fSupported = false;
     357    pVM->hm.s.vmx.fEnabled   = false;
     358    pVM->hm.s.svm.fEnabled   = false;
     359    pVM->hm.s.fNestedPaging  = false;
     360#endif
    360361
    361362    /*
     
    12601261    }
    12611262
     1263    LogRel(("HM: Supports VMCS EFER fields       = %RTbool\n", pVM->hm.s.vmx.fSupportsVmcsEfer));
    12621264    LogRel(("HM: VMX enabled!\n"));
    12631265    pVM->hm.s.vmx.fEnabled = true;
  • trunk/src/VBox/VMM/include/HMInternal.h

    r51083 r51220  
    148148#define HM_CHANGED_GUEST_SYSENTER_EIP_MSR        RT_BIT(14)
    149149#define HM_CHANGED_GUEST_SYSENTER_ESP_MSR        RT_BIT(15)
    150 #define HM_CHANGED_GUEST_LAZY_MSRS               RT_BIT(16)     /* Shared */
     150#define HM_CHANGED_GUEST_EFER_MSR                RT_BIT(16)
     151#define HM_CHANGED_GUEST_LAZY_MSRS               RT_BIT(17)     /* Shared */
    151152/* VT-x specific state. */
    152 #define HM_CHANGED_VMX_GUEST_AUTO_MSRS           RT_BIT(17)
    153 #define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE      RT_BIT(18)
    154 #define HM_CHANGED_VMX_GUEST_APIC_STATE          RT_BIT(19)
    155 #define HM_CHANGED_VMX_ENTRY_CTLS                RT_BIT(20)
    156 #define HM_CHANGED_VMX_EXIT_CTLS                 RT_BIT(21)
     153#define HM_CHANGED_VMX_GUEST_AUTO_MSRS           RT_BIT(18)
     154#define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE      RT_BIT(19)
     155#define HM_CHANGED_VMX_GUEST_APIC_STATE          RT_BIT(20)
     156#define HM_CHANGED_VMX_ENTRY_CTLS                RT_BIT(21)
     157#define HM_CHANGED_VMX_EXIT_CTLS                 RT_BIT(22)
    157158/* AMD-V specific state. */
    158 #define HM_CHANGED_SVM_GUEST_EFER_MSR            RT_BIT(17)
    159159#define HM_CHANGED_SVM_GUEST_APIC_STATE          RT_BIT(18)
    160160#define HM_CHANGED_SVM_RESERVED1                 RT_BIT(19)
    161161#define HM_CHANGED_SVM_RESERVED2                 RT_BIT(20)
    162162#define HM_CHANGED_SVM_RESERVED3                 RT_BIT(21)
     163#define HM_CHANGED_SVM_RESERVED4                 RT_BIT(22)
    163164
    164165#define HM_CHANGED_ALL_GUEST                     (  HM_CHANGED_GUEST_CR0                \
     
    178179                                                  | HM_CHANGED_GUEST_SYSENTER_EIP_MSR   \
    179180                                                  | HM_CHANGED_GUEST_SYSENTER_ESP_MSR   \
     181                                                  | HM_CHANGED_GUEST_EFER_MSR           \
    180182                                                  | HM_CHANGED_GUEST_LAZY_MSRS          \
    181183                                                  | HM_CHANGED_VMX_GUEST_AUTO_MSRS      \
     
    185187                                                  | HM_CHANGED_VMX_EXIT_CTLS)
    186188
    187 #define HM_CHANGED_HOST_CONTEXT                  RT_BIT(22)
     189#define HM_CHANGED_HOST_CONTEXT                  RT_BIT(23)
    188190
    189191/* Bits shared between host and guest. */
     
    414416        /** Host EFER value (set by ring-0 VMX init) */
    415417        uint64_t                    u64HostEfer;
     418        /** Whether the CPU supports VMCS fields for swapping EFER. */
     419        bool                        fSupportsVmcsEfer;
     420        bool                        afAlignment1[7];
    416421
    417422        /** VMX MSR values */
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette