VirtualBox

Changeset 9484 in vbox


Ignore:
Timestamp:
Jun 6, 2008 2:02:24 PM (17 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
31744
Message:

Save & restore CSTAR, STAR, SFMASK & KERNEL_GSBASE MSRs (VT-x)

Location:
trunk/src/VBox/VMM/VMMR0
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm

    r9475 r9484  
    6565
    6666%ifdef RT_ARCH_AMD64
     67  ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
     68  %macro LOADGUESTMSR 2
     69    mov     rcx, %1
     70    rdmsr
     71    push    rdx
     72    push    rax
     73    xor     rdx, rdx
     74    mov     rax, qword [xSI + %2]
     75    wrmsr
     76  %endmacro
     77
     78  ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
     79  %macro LOADHOSTMSR 2
     80    mov     rcx, %1
     81    rdmsr
     82    mov     qword [xSI + %2], rax
     83    pop     rax
     84    pop     rdx
     85    wrmsr
     86  %endmacro
     87
    6788 %ifdef ASM_CALL64_GCC
    6889  %macro MYPUSHAD 0
     
    102123 %endif
    103124
     125; trashes, rax, rdx & rcx
    104126 %macro MYPUSHSEGS 2
    105127    mov     %2, es
     
    109131
    110132    ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
    111     push    rcx
    112133    mov     ecx, MSR_K8_FS_BASE
    113134    rdmsr
    114     pop     rcx
    115135    push    rdx
    116136    push    rax
     
    118138
    119139    ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
    120     push    rcx
    121140    mov     ecx, MSR_K8_GS_BASE
    122141    rdmsr
    123     pop     rcx
    124142    push    rdx
    125143    push    rax
     
    127145 %endmacro
    128146
     147; trashes, rax, rdx & rcx
    129148 %macro MYPOPSEGS 2
    130149    ; Note: do not step through this code with a debugger!
     
    132151    pop     rax
    133152    pop     rdx
    134     push    rcx
    135153    mov     ecx, MSR_K8_GS_BASE
    136154    wrmsr
    137     pop     rcx
    138155
    139156    pop     fs
    140157    pop     rax
    141158    pop     rdx
    142     push    rcx
    143159    mov     ecx, MSR_K8_FS_BASE
    144160    wrmsr
    145     pop     rcx
    146161    ; Now it's safe to step again
    147162
     
    233248
    234249    ;/* Save segment registers */
    235     ; Note: MYPUSHSEGS trashes rdx (among others), so we moved it here (msvc amd64 case)
     250    ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
    236251    MYPUSHSEGS xAX, ax
    237252
     
    434449
    435450    ;/* Save segment registers */
    436     ; Note: MYPUSHSEGS trashes rdx (among others), so we moved it here (msvc amd64 case)
     451    ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
    437452    MYPUSHSEGS xAX, ax
     453
     454    ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
     455    ; @todo use the automatic load feature for MSRs
     456    LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
     457    LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
     458    LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
     459    LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    438460
    439461    ; Save the pCtx pointer
     
    544566    add     xSP, xS      ; pCtx
    545567
     568    ; Save the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the host MSRs
     569    ; @todo use the automatic load feature for MSRs
     570    LOADHOSTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     571    LOADHOSTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
     572    LOADHOSTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
     573    LOADHOSTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
     574
    546575    ; Restore segment registers
    547576    MYPOPSEGS xAX, ax
     
    569598
    570599    add     xSP, xS     ; pCtx
     600
     601    ; Save the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the host MSRs
     602    ; @todo use the automatic load feature for MSRs
     603    LOADHOSTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     604    LOADHOSTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
     605    LOADHOSTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
     606    LOADHOSTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
    571607
    572608    ; Restore segment registers
     
    589625
    590626    add     xSP, xS     ; pCtx
     627
     628    ; Save the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the host MSRs
     629    ; @todo use the automatic load feature for MSRs
     630    LOADHOSTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     631    LOADHOSTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
     632    LOADHOSTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
     633    LOADHOSTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
    591634
    592635    ; Restore segment registers
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r9475 r9484  
    951951     */
    952952    val = (pVM->hwaccm.s.vmx.msr.vmx_entry & 0xFFFFFFFF);
    953 
    954     /* Mask away the bits that the CPU doesn't support */
    955     /** @todo make sure they don't conflict with the above requirements. */
    956     val &= (pVM->hwaccm.s.vmx.msr.vmx_entry >> 32ULL);
    957     /* else Must be zero when AMD64 is not available. */
    958     rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, val);
    959     AssertRC(rc);
    960 
    961953    /* 64 bits guest mode? */
    962954    if (pCtx->msrEFER & MSR_K6_EFER_LMA)
    963     {
    964955        val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE;
     956    /* else Must be zero when AMD64 is not available. */
     957
     958    /* Mask away the bits that the CPU doesn't support */
     959    val &= (pVM->hwaccm.s.vmx.msr.vmx_entry >> 32ULL);
     960    rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, val);
     961    AssertRC(rc);
     962
     963    /* 64 bits guest mode? */
     964    if (pCtx->msrEFER & MSR_K6_EFER_LMA)
     965    {
    965966#ifndef VBOX_WITH_64_BITS_GUESTS
    966967        return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette