VirtualBox

Changeset 22079 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Aug 7, 2009 4:26:25 PM (16 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
50826
Message:

Back to manual msr save and restore.

Location:
trunk/src/VBox/VMM
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/HWACCMInternal.h

    r22064 r22079  
    4545#define HWACCM_VTX_WITH_EPT
    4646#define HWACCM_VTX_WITH_VPID
     47
     48
     49#if 0
     50/* Seeing somewhat random behaviour on my Nehalem system with auto-save of guest MSRs;
     51 * for some strange reason the CPU doesn't save the MSRs during the VM-exit.
     52 * Clearly visible with a dual VCPU configured OpenSolaris 200906 live cd VM.
     53 *
     54 * Note: change the assembly files when enabling this! (remove the manual auto load/save)
     55 */
     56#define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     57#endif
    4758
    4859RT_C_DECLS_BEGIN
     
    586597        R0PTRTYPE(uint8_t *)        pMSRBitmap;
    587598
     599#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    588600        /** Physical address of the guest MSR load area (1 page). */
    589601        RTHCPHYS                    pGuestMSRPhys;
     
    599611        /** Virtual address of the MSR load area (1 page). */
    600612        R0PTRTYPE(uint8_t *)        pHostMSR;
     613#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    601614
    602615        /* Number of automatically loaded/restored MSRs. */
  • trunk/src/VBox/VMM/VMMGC/HWACCMGCA.asm

    r22040 r22079  
    232232    ; *
    233233    ; */
     234
     235    ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
     236    ;; @todo use the automatic load feature for MSRs
     237    LOADGUESTMSR MSR_K8_LSTAR,          CPUMCTX.msrLSTAR
     238%if 0  ; not supported on Intel CPUs
     239    LOADGUESTMSR MSR_K8_CSTAR,          CPUMCTX.msrCSTAR
     240%endif
     241    LOADGUESTMSR MSR_K6_STAR,           CPUMCTX.msrSTAR
     242    LOADGUESTMSR MSR_K8_SF_MASK,        CPUMCTX.msrSFMASK
     243    LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     244
    234245%ifdef VBOX_WITH_CRASHDUMP_MAGIC
    235246    mov     qword [rbx + VMCSCACHE.uPos], 5
     
    297308
    298309    pop     rsi         ; pCtx (needed in rsi by the macros below)
     310
     311    ;; @todo use the automatic load feature for MSRs
     312    SAVEGUESTMSR MSR_K8_LSTAR,          CPUMCTX.msrLSTAR
     313%if 0  ; not supported on Intel CPUs
     314    SAVEGUESTMSR MSR_K8_CSTAR,          CPUMCTX.msrCSTAR
     315%endif
     316    SAVEGUESTMSR MSR_K6_STAR,           CPUMCTX.msrSTAR
     317    SAVEGUESTMSR MSR_K8_SF_MASK,        CPUMCTX.msrSFMASK
     318    SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    299319
    300320%ifdef VMX_USE_CACHED_VMCS_ACCESSES
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0Mixed.mac

    r22040 r22079  
    414414%endif
    415415
     416    ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
     417    ;; @todo use the automatic load feature for MSRs
     418    LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
     419%if 0  ; not supported on Intel CPUs
     420    LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
     421%endif
     422    LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
     423    LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
     424    LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     425
    416426    ; Save the pCtx pointer
    417427    push    xSI
     
    521531    pop     xSI         ; pCtx (needed in rsi by the macros below)
    522532
     533    ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
     534    ;; @todo use the automatic load feature for MSRs
     535    LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     536    LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
     537    LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
     538%if 0  ; not supported on Intel CPUs
     539    LOADHOSTMSREX MSR_K8_CSTAR, CPUMCTX.msrCSTAR
     540%endif
     541    LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
     542
    523543%ifdef VMX_USE_CACHED_VMCS_ACCESSES
    524544    pop     xDX         ; saved pCache
     
    569589    pop     xSI         ; pCtx (needed in rsi by the macros below)
    570590
     591    ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
     592    ;; @todo use the automatic load feature for MSRs
     593    LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     594    LOADHOSTMSR MSR_K8_SF_MASK
     595    LOADHOSTMSR MSR_K6_STAR
     596%if 0  ; not supported on Intel CPUs
     597    LOADHOSTMSR MSR_K8_CSTAR
     598%endif
     599    LOADHOSTMSR MSR_K8_LSTAR
     600
    571601%ifdef VMX_USE_CACHED_VMCS_ACCESSES
    572602    add     xSP, xS     ; pCache
     
    592622
    593623    pop     xSI         ; pCtx (needed in rsi by the macros below)
     624
     625    ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
     626    ;; @todo use the automatic load feature for MSRs
     627    LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     628    LOADHOSTMSR MSR_K8_SF_MASK
     629    LOADHOSTMSR MSR_K6_STAR
     630%if 0  ; not supported on Intel CPUs
     631    LOADHOSTMSR MSR_K8_CSTAR
     632%endif
     633    LOADHOSTMSR MSR_K8_LSTAR
    594634
    595635%ifdef VMX_USE_CACHED_VMCS_ACCESSES
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r22063 r22079  
    258258        }
    259259
     260#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    260261        /* Allocate one page for the guest MSR load area (for preloading guest MSRs during the world switch). */
    261262        rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
     
    277278        pVCpu->hwaccm.s.vmx.pHostMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjHostMSR, 0);
    278279        memset(pVCpu->hwaccm.s.vmx.pHostMSR, 0, PAGE_SIZE);
     280#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    279281
    280282        /* Current guest paging mode. */
     
    322324            pVCpu->hwaccm.s.vmx.pMSRBitmapPhys   = 0;
    323325        }
     326#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    324327        if (pVCpu->hwaccm.s.vmx.pMemObjHostMSR != NIL_RTR0MEMOBJ)
    325328        {
     
    336339            pVCpu->hwaccm.s.vmx.pGuestMSRPhys   = 0;
    337340        }
     341#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    338342    }
    339343    if (pVM->hwaccm.s.vmx.pMemObjAPIC != NIL_RTR0MEMOBJ)
     
    530534        }
    531535
     536#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    532537        /* Set the guest & host MSR load/store physical addresses. */
    533538        Assert(pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
     
    540545        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL,  pVCpu->hwaccm.s.vmx.pHostMSRPhys);
    541546        AssertRC(rc);
    542        
     547#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
     548
     549        rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
     550        AssertRC(rc);
     551
     552        rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, 0);
     553        AssertRC(rc);
     554
    543555        if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
    544556        {
     
    11641176        AssertRC(rc);
    11651177
     1178#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    11661179        /* Store all host MSRs in the VM-Exit load area, so they will be reloaded after the world switch back to the host. */
    11671180        PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pHostMSR;
     
    11811194            pMsr->u32IndexMSR = MSR_K6_EFER;
    11821195            pMsr->u32Reserved = 0;
    1183 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     1196# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    11841197            if (CPUMIsGuestInLongMode(pVCpu))
    11851198            {
     
    11881201            }
    11891202            else
    1190 #endif
     1203# endif
    11911204                pMsr->u64Value    = ASMRdMsr(MSR_K6_EFER);
    11921205            pMsr++; idxMsr++;
    11931206        }
    11941207
    1195 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     1208# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    11961209        if (VMX_IS_64BIT_HOST_MODE())
    11971210        {
     
    12091222            pMsr++; idxMsr++;
    12101223        }
    1211 #endif
     1224# endif
    12121225        rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_LOAD_COUNT, idxMsr);
    12131226        AssertRC(rc);
     1227#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    12141228
    12151229        pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_HOST_CONTEXT;
     
    18381852    vmxR0UpdateExceptionBitmap(pVM, pVCpu, pCtx);
    18391853
     1854#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    18401855    /* Store all guest MSRs in the VM-Entry load area, so they will be loaded during the world switch. */
    18411856    PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pGuestMSR;
     
    18831898    rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, idxMsr);
    18841899    AssertRC(rc);
     1900#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    18851901
    18861902    /* Done. */
     
    20142030    }
    20152031
     2032#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    20162033    /* Save the possibly changed MSRs that we automatically restore and save during a world switch. */
    20172034    for (unsigned i = 0; i < pVCpu->hwaccm.s.vmx.cCachedMSRs; i++)
     
    20432060        }
    20442061    }
     2062#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    20452063    return VINF_SUCCESS;
    20462064}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette