VirtualBox

Changeset 42156 in vbox


Ignore:
Timestamp:
Jul 16, 2012 6:59:45 AM (13 years ago)
Author:
vboxsync
Message:

VMM/VMMR0: HWVMXR0: Use MSR auto load/store areas in the VMCS. Added IA32_TSC_AUX for auto load/store. Cleanups.

Location:
trunk/src/VBox/VMM
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0Mixed.mac

    r30414 r42156  
    404404%endif
    405405
    406     ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
    407     ;; @todo use the automatic load feature for MSRs
     406%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     407    ; Save the host MSRs and load the guest MSRs
    408408    LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
    409409%if 0  ; not supported on Intel CPUs
     
    412412    LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
    413413    LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
     414%endif
     415    ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}
    414416    LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    415417
     
    538540    pop     xSI         ; pCtx (needed in rsi by the macros below)
    539541
    540     ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
    541     ;; @todo use the automatic load feature for MSRs
     542    ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}.
    542543    LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     544%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     545    ; Save the guest MSRs and load the host MSRs
    543546    LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
    544547    LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
     
    547550%endif
    548551    LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
     552%endif
    549553
    550554%ifdef VMX_USE_CACHED_VMCS_ACCESSES
     
    609613    pop     xSI         ; pCtx (needed in rsi by the macros below)
    610614
    611     ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
    612     ;; @todo use the automatic load feature for MSRs
     615    ; Kernel GS base is special, we need to manually load/store it See @bugref{6208}.
    613616    LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     617%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     618    ; Load the host MSRs
    614619    LOADHOSTMSR MSR_K8_SF_MASK
    615620    LOADHOSTMSR MSR_K6_STAR
     
    618623%endif
    619624    LOADHOSTMSR MSR_K8_LSTAR
     625%endif
    620626
    621627%ifdef VMX_USE_CACHED_VMCS_ACCESSES
     
    656662    pop     xSI         ; pCtx (needed in rsi by the macros below)
    657663
    658     ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
    659     ;; @todo use the automatic load feature for MSRs
     664    ; Kernel GS base is special, load it manually. See @bugref{6208}.
    660665    LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     666%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     667    ; Load the host MSRs
    661668    LOADHOSTMSR MSR_K8_SF_MASK
    662669    LOADHOSTMSR MSR_K6_STAR
     
    665672%endif
    666673    LOADHOSTMSR MSR_K8_LSTAR
     674%endif
    667675
    668676%ifdef VMX_USE_CACHED_VMCS_ACCESSES
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r42056 r42156  
    210210
    211211#ifdef LOG_ENABLED
    212     SUPR0Printf("VMXR0InitVM %x\n", pVM);
     212    SUPR0Printf("VMXR0InitVM %p\n", pVM);
    213213#endif
    214214
     
    626626            AssertRC(rc);
    627627
    628             /* Allow the guest to directly modify these MSRs; they are restored and saved automatically. */
     628            /*
     629             * Allow the guest to directly modify these MSRs; they are loaded/stored automatically
     630             * using MSR-load/store areas in the VMCS.
     631             */
    629632            hmR0VmxSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_CS, true, true);
    630633            hmR0VmxSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_ESP, true, true);
     
    636639            hmR0VmxSetMSRPermission(pVCpu, MSR_K8_GS_BASE, true, true);
    637640            hmR0VmxSetMSRPermission(pVCpu, MSR_K8_FS_BASE, true, true);
     641            if (pVCpu->hwaccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     642                hmR0VmxSetMSRPermission(pVCpu, MSR_K8_TSC_AUX, true, true);
    638643        }
    639644
     
    647652        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
    648653        AssertRC(rc);
    649 
    650654        Assert(pVCpu->hwaccm.s.vmx.pHostMSRPhys);
    651655        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL,  pVCpu->hwaccm.s.vmx.pHostMSRPhys);
     
    656660        AssertRC(rc);
    657661        rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, 0);
     662        AssertRC(rc);
     663        rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_LOAD_COUNT, 0);
    658664        AssertRC(rc);
    659665
     
    13191325         * Check if EFER MSR present.
    13201326         */
    1321         if (ASMCpuId_EDX(0x80000001) & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
    1322         {
    1323             if (ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
     1327        uint32_t u32HostExtFeatures = ASMCpuId_EDX(0x80000001);
     1328        if (u32HostExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
     1329        {
     1330            if (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)
    13241331            {
    13251332                pMsr->u32IndexMSR = MSR_K6_STAR;
     
    13541361            pMsr->u64Value    = ASMRdMsr(MSR_K8_SF_MASK);           /* syscall flag mask */
    13551362            pMsr++; idxMsr++;
     1363
     1364            /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208}  */
     1365#if 0
    13561366            pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
    13571367            pMsr->u32Reserved = 0;
    13581368            pMsr->u64Value    = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);    /* swapgs exchange value */
    13591369            pMsr++; idxMsr++;
     1370#endif
    13601371        }
    13611372# endif
     1373
     1374        if (pVCpu->hwaccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     1375        {
     1376            Assert(u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
     1377            pMsr->u32IndexMSR = MSR_K8_TSC_AUX;
     1378            pMsr->u32Reserved = 0;
     1379            pMsr->u64Value    = ASMRdMsr(MSR_K8_TSC_AUX);
     1380            pMsr++; idxMsr++;
     1381        }
     1382
     1383        /** @todo r=ramshankar: check IA32_VMX_MISC bits 27:25 for valid idxMsr
     1384         *        range. */
    13621385        rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_LOAD_COUNT, idxMsr);
    13631386        AssertRC(rc);
     1387
     1388        pVCpu->hwaccm.s.vmx.cCachedMSRs = idxMsr;
    13641389#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    13651390
     
    20932118
    20942119#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    2095     /* Store all guest MSRs in the VM-Entry load area, so they will be loaded during the world switch. */
     2120    /*
     2121     * Store all guest MSRs in the VM-entry load area, so they will be loaded
     2122     * during the world switch.
     2123     */
    20962124    PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pGuestMSR;
    20972125    unsigned idxMsr = 0;
    20982126
    2099     uint32_t ulEdx;
    2100     uint32_t ulTemp;
    2101     CPUMGetGuestCpuId(pVCpu, 0x80000001, &ulTemp, &ulTemp, &ulTemp, &ulEdx);
    2102     /* EFER MSR present? */
    2103     if (ulEdx & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
     2127    uint32_t u32GstExtFeatures;
     2128    uint32_t u32Temp;
     2129    CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Temp, &u32Temp, &u32Temp, &u32GstExtFeatures);
     2130
     2131    /*
     2132     * Check if EFER MSR present.
     2133     */
     2134    if (u32GstExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
    21042135    {
    21052136        pMsr->u32IndexMSR = MSR_K6_EFER;
     
    21112142        pMsr++; idxMsr++;
    21122143
    2113         if (ulEdx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
     2144        if (u32GstExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
    21142145        {
    21152146            pMsr->u32IndexMSR = MSR_K8_LSTAR;
     
    21252156            pMsr->u64Value    = pCtx->msrSFMASK;          /* syscall flag mask */
    21262157            pMsr++; idxMsr++;
     2158
     2159            /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208}  */
     2160#if 0
    21272161            pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
    21282162            pMsr->u32Reserved = 0;
    21292163            pMsr->u64Value    = pCtx->msrKERNELGSBASE;    /* swapgs exchange value */
    21302164            pMsr++; idxMsr++;
    2131         }
    2132     }
    2133     pVCpu->hwaccm.s.vmx.cCachedMSRs = idxMsr;
     2165#endif
     2166        }
     2167    }
     2168
     2169    if (pVCpu->hwaccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     2170    {
     2171        Assert(u32GstExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
     2172        pMsr->u32IndexMSR = MSR_K8_TSC_AUX;
     2173        pMsr->u32Reserved = 0;
     2174        rc = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pMsr->u64Value);
     2175        AssertRC(rc);
     2176        pMsr++; idxMsr++;
     2177    }
     2178
     2179    /*
     2180     * The number of host MSRs saved must be identical to the number of guest MSRs loaded.
     2181     * It's not a VT-x requirement but how it's practically used here.
     2182     */
     2183    Assert(pVCpu->hwaccm.s.vmx.cCachedMSRs == idxMsr);
    21342184
    21352185    rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_MSR_LOAD_COUNT, idxMsr);
     
    23272377
    23282378#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    2329     /* Save the possibly changed MSRs that we automatically restore and save during a world switch. */
     2379    /*
     2380     * Save the possibly changed MSRs that we automatically restore and save during a world switch.
     2381     */
    23302382    for (unsigned i = 0; i < pVCpu->hwaccm.s.vmx.cCachedMSRs; i++)
    23312383    {
     
    23442396                pCtx->msrSFMASK = pMsr->u64Value;
    23452397                break;
     2398
     2399            /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208}  */
     2400#if 0
    23462401            case MSR_K8_KERNEL_GS_BASE:
    23472402                pCtx->msrKERNELGSBASE = pMsr->u64Value;
    23482403                break;
     2404#endif
     2405            case MSR_K8_TSC_AUX:
     2406                CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value);
     2407                break;
     2408
    23492409            case MSR_K6_EFER:
    23502410                /* EFER can't be changed without causing a VM-exit. */
     
    29673027            pCtx->msrLSTAR = u8LastTPR;
    29683028
     3029            /** @todo r=ramshankar: we should check for MSR-bitmap support here. */
    29693030            if (fPending)
    29703031            {
     
    30993160    TMNotifyStartOfExecution(pVCpu);
    31003161
     3162#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    31013163    /*
    31023164     * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
     
    31123174        ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTSCAux);
    31133175    }
     3176#endif
    31143177
    31153178#ifdef VBOX_WITH_KERNEL_USING_XMM
     
    31213184    ASMAtomicIncU32(&pVCpu->hwaccm.s.cWorldSwitchExits);
    31223185
     3186#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    31233187    /*
    31243188     * Restore host's TSC_AUX.
     
    31293193        ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hwaccm.s.vmx.u64HostTSCAux);
    31303194    }
     3195#endif
    31313196
    31323197    /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */
     
    51725237                if (VMX_IS_64BIT_HOST_MODE())
    51735238                {
    5174                     Log(("MSR_K6_EFER       = %RX64\n", ASMRdMsr(MSR_K6_EFER)));
    5175                     Log(("MSR_K6_STAR       = %RX64\n", ASMRdMsr(MSR_K6_STAR)));
    5176                     Log(("MSR_K8_LSTAR      = %RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
    5177                     Log(("MSR_K8_CSTAR      = %RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
    5178                     Log(("MSR_K8_SF_MASK    = %RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
     5239                    Log(("MSR_K6_EFER            = %RX64\n", ASMRdMsr(MSR_K6_EFER)));
     5240                    Log(("MSR_K6_STAR            = %RX64\n", ASMRdMsr(MSR_K6_STAR)));
     5241                    Log(("MSR_K8_LSTAR           = %RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
     5242                    Log(("MSR_K8_CSTAR           = %RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
     5243                    Log(("MSR_K8_SF_MASK         = %RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
     5244                    Log(("MSR_K8_KERNEL_GS_BASE  = %RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
    51795245                }
    51805246# endif
  • trunk/src/VBox/VMM/include/HWACCMInternal.h

    r42056 r42156  
    4242
    4343
    44 #if 0
    45 /* Seeing somewhat random behaviour on my Nehalem system with auto-save of guest MSRs;
    46  * for some strange reason the CPU doesn't save the MSRs during the VM-exit.
    47  * Clearly visible with a dual VCPU configured OpenSolaris 200906 live cd VM.
     44/* The MSR auto load/store does not work for KERNEL_GS_BASE MSR, thus we
     45 * handle this MSR manually. See @bugref{6208}. This is clearly visible while
     46 * booting Solaris 11 (11.1 b19) VMs with 2 Cpus.
    4847 *
    49  * Note: change the assembly files when enabling this! (remove the manual auto load/save)
     48 * Note: don't forget to update the assembly files while modifying this!
    5049 */
    5150#define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    52 #endif
    5351
    5452RT_C_DECLS_BEGIN
     
    626624        /** Virtual address of the MSR load area (1 page). */
    627625        R0PTRTYPE(uint8_t *)        pHostMSR;
    628 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    629626
    630627        /* Number of automatically loaded/restored MSRs. */
    631628        uint32_t                    cCachedMSRs;
    632629        uint32_t                    uAlignement;
     630#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    633631
    634632        /* Host's IA32_TSC_AUX MSR (for RDTSCP in VMX non-root). */
  • trunk/src/VBox/VMM/include/HWACCMInternal.mac

    r37323 r42156  
    1616
    1717%define VMX_USE_CACHED_VMCS_ACCESSES
     18%define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    1819
    1920;Maximum number of cached entries.
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette