VirtualBox

Ignore:
Timestamp:
Dec 3, 2013 12:39:31 PM (11 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: Drop 64-bit MSR optimizations on hybrid darwin kernels.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r49740 r49752  
    14001400
    14011401
    1402 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     1402#if HC_ARCH_BITS == 64
    14031403/**
    14041404 * Saves a set of host MSRs to allow read/write passthru access to the guest and
     
    15491549    pVCpu->hm.s.vmx.fRestoreHostMsrs &= ~(VMX_RESTORE_HOST_MSR_LOADED_GUEST | VMX_RESTORE_HOST_MSR_SAVED_HOST);
    15501550}
    1551 #endif  /* HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
     1551#endif  /* HC_ARCH_BITS == 64 */
    15521552
    15531553
     
    23292329        hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE,        VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    23302330
    2331 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     2331#if HC_ARCH_BITS == 64
    23322332        /*
    23332333         * Set passthru permissions for the following MSRs (mandatory for VT-x) required for 64-bit guests.
    23342334         */
    2335         if (   HMVMX_IS_64BIT_HOST_MODE()
    2336             && pVM->hm.s.fAllow64BitGuests)
     2335        if (pVM->hm.s.fAllow64BitGuests)
    23372336        {
    23382337            hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR,          VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
     
    30073006
    30083007    int rc = VINF_SUCCESS;
    3009 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    3010     if (   HMVMX_IS_64BIT_HOST_MODE()
    3011         && pVM->hm.s.fAllow64BitGuests)
    3012     {
     3008#if HC_ARCH_BITS == 64
     3009    if (pVM->hm.s.fAllow64BitGuests)
    30133010        hmR0VmxLazySaveHostMsrs(pVCpu);
    3014     }
    30153011#endif
    30163012
     
    44574453    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS))
    44584454    {
     4455#if HC_ARCH_BITS == 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    44594456        if (pVM->hm.s.fAllow64BitGuests)
    44604457        {
    4461 #if HC_ARCH_BITS == 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    4462             if (!HMVMX_IS_64BIT_HOST_MODE())
    4463             {
    4464                 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR,          pMixedCtx->msrLSTAR,        false /* fUpdateHostMsr */);
    4465                 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR,           pMixedCtx->msrSTAR,         false /* fUpdateHostMsr */);
    4466                 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK,        pMixedCtx->msrSFMASK,       false /* fUpdateHostMsr */);
    4467                 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false /* fUpdateHostMsr */);
    4468             }
     4458            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR,          pMixedCtx->msrLSTAR,        false /* fUpdateHostMsr */);
     4459            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR,           pMixedCtx->msrSTAR,         false /* fUpdateHostMsr */);
     4460            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK,        pMixedCtx->msrSFMASK,       false /* fUpdateHostMsr */);
     4461            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false /* fUpdateHostMsr */);
    44694462# ifdef DEBUG
    44704463            PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
     
    44724465                Log4(("Load: MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value));
    44734466# endif
    4474 #endif
    4475         }
     4467        }
     4468#endif
    44764469        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    44774470    }
     
    58465839static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    58475840{
    5848 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    5849     if (   HMVMX_IS_64BIT_HOST_MODE()
    5850         && pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
     5841#if HC_ARCH_BITS == 64
     5842    if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
    58515843    {
    58525844        /* We should not get preempted to a different CPU at this point while reading the MSRs. */
     
    58655857    }
    58665858    else
    5867     {
    5868         /* Darwin 32-bit/PAE kernel or 64-bit host running 32-bit guest. */
    58695859        pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LAZY_MSRS;
    5870     }
    5871 #else   /* HC_ARCH_BITS == 32 */
     5860#else
    58725861    NOREF(pMixedCtx);
    58735862    pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LAZY_MSRS;
     
    66426631#endif
    66436632
    6644 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     6633#if HC_ARCH_BITS == 64
    66456634    /* Restore the host MSRs as we're leaving VT-x context. */
    6646     if (   HMVMX_IS_64BIT_HOST_MODE()
    6647         && pVM->hm.s.fAllow64BitGuests
     6635    if (   pVM->hm.s.fAllow64BitGuests
    66486636        && pVCpu->hm.s.vmx.fRestoreHostMsrs)
    66496637    {
     
    68706858#endif
    68716859
    6872 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     6860#if HC_ARCH_BITS == 64
    68736861        /* Restore the host MSRs as we're leaving VT-x context. */
    6874         if (   HMVMX_IS_64BIT_HOST_MODE()
    6875             && pVM->hm.s.fAllow64BitGuests
     6862        if (   pVM->hm.s.fAllow64BitGuests
    68766863            && pVCpu->hm.s.vmx.fRestoreHostMsrs)
    68776864        {
     
    78297816    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS))
    78307817    {
    7831 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    7832         if (   HMVMX_IS_64BIT_HOST_MODE()
    7833             && pVM->hm.s.fAllow64BitGuests)
    7834         {
     7818#if HC_ARCH_BITS == 64
     7819        if (pVM->hm.s.fAllow64BitGuests)
    78357820            hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
    7836         }
    78377821#endif
    78387822        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
     
    99359919            HMVMX_RETURN_UNEXPECTED_EXIT();
    99369920        }
    9937 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    9938         if (   HMVMX_IS_64BIT_HOST_MODE()
    9939             && pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests
     9921# if HC_ARCH_BITS == 64
     9922        if (   pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests
    99409923            && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
    99419924        {
     
    1001710000                    if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
    1001810001                        HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    10019 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    10020                     else if (   HMVMX_IS_64BIT_HOST_MODE()
    10021                              && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
    10022                     {
     10002#if HC_ARCH_BITS == 64
     10003                    else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
    1002310004                        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
    10024                     }
    1002510005#endif
    1002610006                    break;
     
    1005410034                    }
    1005510035
    10056 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    10057                     if (   HMVMX_IS_64BIT_HOST_MODE()
    10058                         && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
     10036#if HC_ARCH_BITS == 64
     10037                    if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
    1005910038                    {
    1006010039                        AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette