VirtualBox

Changeset 49523 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Nov 18, 2013 10:59:01 AM (11 years ago)
Author:
vboxsync
Message:

VMM: Retire VBOX_WITH_AUTO_MSR_LOAD_RESTORE define.

Location:
trunk/src/VBox/VMM
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0A.asm

    r48964 r49523  
    8484; @param 1  full width register name
    8585; @param 2  16-bit register name for \a 1.
    86 
    87 %ifdef MAYBE_64_BIT
    88   ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
    89   %macro LOADGUESTMSR 2
    90     mov     rcx, %1
    91     rdmsr
    92     push    rdx
    93     push    rax
    94     mov     edx, dword [xSI + %2 + 4]
    95     mov     eax, dword [xSI + %2]
    96     wrmsr
    97   %endmacro
    98 
    99   ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
    100   ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
    101   %macro LOADHOSTMSREX 2
    102     mov     rcx, %1
    103     rdmsr
    104     mov     dword [xSI + %2], eax
    105     mov     dword [xSI + %2 + 4], edx
    106     pop     rax
    107     pop     rdx
    108     wrmsr
    109   %endmacro
    110 
    111   ; Load the corresponding host MSR (trashes rdx & rcx)
    112   %macro LOADHOSTMSR 1
    113     mov     rcx, %1
    114     pop     rax
    115     pop     rdx
    116     wrmsr
    117   %endmacro
    118 %endif
    11986
    12087%ifdef ASM_CALL64_GCC
     
    156123
    157124%ifdef VBOX_SKIP_RESTORE_SEG
    158 %macro MYPUSHSEGS64 2
    159 %endmacro
    160 
    161 %macro MYPOPSEGS64 2
    162 %endmacro
    163 %else ; !VBOX_SKIP_RESTORE_SEG
    164 ; trashes, rax, rdx & rcx
    165 %macro MYPUSHSEGS64 2
    166  %ifndef HM_64_BIT_USE_NULL_SEL
     125 %macro MYPUSHSEGS64 2
     126 %endmacro
     127
     128 %macro MYPOPSEGS64 2
     129 %endmacro
     130%else       ; !VBOX_SKIP_RESTORE_SEG
     131 ; trashes, rax, rdx & rcx
     132 %macro MYPUSHSEGS64 2
     133  %ifndef HM_64_BIT_USE_NULL_SEL
    167134   mov     %2, es
    168135   push    %1
    169136   mov     %2, ds
    170137   push    %1
    171  %endif
     138  %endif
    172139
    173140   ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
     
    176143   push    rdx
    177144   push    rax
    178  %ifndef HM_64_BIT_USE_NULL_SEL
     145  %ifndef HM_64_BIT_USE_NULL_SEL
    179146   push    fs
    180  %endif
     147  %endif
    181148
    182149   ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
     
    185152   push    rdx
    186153   push    rax
    187  %ifndef HM_64_BIT_USE_NULL_SEL
     154  %ifndef HM_64_BIT_USE_NULL_SEL
    188155   push    gs
    189  %endif
    190 %endmacro
    191 
    192 ; trashes, rax, rdx & rcx
    193 %macro MYPOPSEGS64 2
     156  %endif
     157 %endmacro
     158
     159 ; trashes, rax, rdx & rcx
     160 %macro MYPOPSEGS64 2
    194161   ; Note: do not step through this code with a debugger!
    195  %ifndef HM_64_BIT_USE_NULL_SEL
     162  %ifndef HM_64_BIT_USE_NULL_SEL
    196163   xor     eax, eax
    197164   mov     ds, ax
     
    199166   mov     fs, ax
    200167   mov     gs, ax
    201  %endif
    202 
    203  %ifndef HM_64_BIT_USE_NULL_SEL
     168  %endif
     169
     170  %ifndef HM_64_BIT_USE_NULL_SEL
    204171   pop     gs
    205  %endif
     172  %endif
    206173   pop     rax
    207174   pop     rdx
     
    209176   wrmsr
    210177
    211  %ifndef HM_64_BIT_USE_NULL_SEL
     178  %ifndef HM_64_BIT_USE_NULL_SEL
    212179   pop     fs
    213  %endif
     180  %endif
    214181   pop     rax
    215182   pop     rdx
     
    218185   ; Now it's safe to step again
    219186
    220  %ifndef HM_64_BIT_USE_NULL_SEL
     187  %ifndef HM_64_BIT_USE_NULL_SEL
    221188   pop     %1
    222189   mov     ds, %2
    223190   pop     %1
    224191   mov     es, %2
    225  %endif
    226 %endmacro
     192  %endif
     193 %endmacro
    227194%endif ; VBOX_SKIP_RESTORE_SEG
    228195
  • trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac

    r49018 r49523  
    356356    pop     xSI         ; pCtx (needed in rsi by the macros below)
    357357
    358  %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    359     ; Save the guest MSRs and load the host MSRs.
    360     LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    361     LOADHOSTMSREX MSR_K8_SF_MASK,        CPUMCTX.msrSFMASK
    362     LOADHOSTMSREX MSR_K6_STAR,           CPUMCTX.msrSTAR
    363     LOADHOSTMSREX MSR_K8_LSTAR,          CPUMCTX.msrLSTAR
    364  %endif
    365 
    366358 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
    367359    pop     xDX         ; Saved pCache
     
    454446    ; Save the pCache pointer.
    455447    push    xBX
    456 %endif
    457 
    458 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    459     ; Save the host MSRs and load the guest MSRs.
    460     LOADGUESTMSR MSR_K8_LSTAR,          CPUMCTX.msrLSTAR
    461     LOADGUESTMSR MSR_K6_STAR,           CPUMCTX.msrSTAR
    462     LOADGUESTMSR MSR_K8_SF_MASK,        CPUMCTX.msrSFMASK
    463     LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    464448%endif
    465449
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r49521 r49523  
    865865        AssertPtr(pVCpu);
    866866
    867 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    868867        hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
    869868        hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
    870 #endif
    871869
    872870        if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
     
    917915        VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
    918916        VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
    919 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    920917        VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
    921918        VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
    922 #endif
    923919    }
    924920#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
     
    983979        }
    984980
    985 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    986981        /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
    987982        rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
     
    993988        if (RT_FAILURE(rc))
    994989            goto cleanup;
    995 #endif
    996990    }
    997991
     
    12011195
    12021196
    1203 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    12041197/**
    12051198 * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR
     
    14491442}
    14501443# endif /* VBOX_STRICT */
    1451 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    14521444
    14531445
     
    23072299#endif
    23082300
    2309 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    2310     /* Setup MSR autoloading/storing. */
     2301    /* Setup MSR auto-load/store area. */
    23112302    Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
    23122303    Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf));    /* Lower 4 bits MBZ. */
     
    23202311    rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,  pVCpu->hm.s.vmx.HCPhysHostMsr);
    23212312    AssertRCReturn(rc, rc);
    2322 #endif
    23232313
    23242314    /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
     
    28402830
    28412831    int rc = VINF_SUCCESS;
    2842 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    28432832#if 0
    28442833    PVMXAUTOMSR  pHostMsr       = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
     
    29392928    if (pVCpu->hm.s.vmx.cMsrs > 0)
    29402929        hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu);
    2941 #endif  /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    29422930
    29432931    /*
     
    43744362    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS))
    43754363    {
    4376 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    43774364        /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */
    43784365        PVM pVM = pVCpu->CTX_SUFF(pVM);
     
    43914378            Log4(("MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value));
    43924379# endif
    4393 #endif  /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    4394 
    43954380        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    43964381    }
     
    57695754        return VINF_SUCCESS;
    57705755
    5771 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    57725756    PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    57735757    Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", pVCpu->hm.s.vmx.cMsrs));
     
    57885772        }
    57895773    }
    5790 #endif
    57915774
    57925775    pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS;
     
    79657948                                                                   to start executing. */
    79667949
    7967     /** @todo Get rid of VBOX_WITH_AUTO_MSR_LOAD_RESTORE define. */
    7968 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    79697950    /*
    79707951     * Load the TSC_AUX MSR when we are not intercepting RDTSCP.
     
    79847965#ifdef VBOX_STRICT
    79857966    hmR0VmxCheckAutoLoadStoreMsrs(pVCpu);
    7986 #endif
    7987 #else
    7988     /*
    7989      * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
    7990      * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
    7991      */
    7992     if (   (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    7993         && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
    7994     {
    7995         pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
    7996         int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAuxMsr);
    7997         AssertRC(rc2);
    7998         ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAuxMsr);
    7999     }
    80007967#endif
    80017968}
     
    80358002        if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    80368003        {
    8037 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    80388004            /* VT-x restored the host TSC_AUX MSR for us, update the guest value from the VMCS area
    80398005               if it could have changed without causing a VM-exit. */
     
    80438009                AssertRC(rc2);
    80448010            }
    8045 #else
    8046             /* Update guest's TSC_AUX if it could have changed. */
    8047             if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    8048             {
    8049                 uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX);
    8050                 CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, u64GuestTscAuxMsr);
    8051             }
    8052             /* Restore host's TSC_AUX. */
    8053             ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
    8054 #endif
    80558011        }
    80568012
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r49520 r49523  
    29242924                    LogRel(("HM: CPU[%u] ExitCtls         %#RX32\n", i, pVCpu->hm.s.vmx.u32ExitCtls));
    29252925                    LogRel(("HM: CPU[%u] HCPhysMsrBitmap  %#RHp\n",  i, pVCpu->hm.s.vmx.HCPhysMsrBitmap));
    2926 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    29272926                    LogRel(("HM: CPU[%u] HCPhysGuestMsr   %#RHp\n",  i, pVCpu->hm.s.vmx.HCPhysGuestMsr));
    29282927                    LogRel(("HM: CPU[%u] HCPhysHostMsr    %#RHp\n",  i, pVCpu->hm.s.vmx.HCPhysHostMsr));
    29292928                    LogRel(("HM: CPU[%u] cMsrs            %u\n",     i, pVCpu->hm.s.vmx.cMsrs));
    2930 #endif
    29312929                }
    29322930                /** @todo Log VM-entry event injection control fields
  • trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac

    r48249 r49523  
    734734;
    735735
    736 
    737 
    738 ; Load the corresponding guest MSR (trashes rdx & rcx)
    739 %macro LOADGUESTMSR 2
    740     mov     rcx, %1
    741     mov     edx, dword [rsi + %2 + 4]
    742     mov     eax, dword [rsi + %2]
    743     wrmsr
    744 %endmacro
    745 
    746 ; Save a guest MSR (trashes rdx & rcx)
    747 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
    748 %macro SAVEGUESTMSR 2
    749     mov     rcx, %1
    750     rdmsr
    751     mov     dword [rsi + %2], eax
    752     mov     dword [rsi + %2 + 4], edx
    753 %endmacro
    754 
    755736;; @def MYPUSHSEGS
    756737; Macro saving all segment registers on the stack.
     
    932913    ;  - DR7 (reset to 0x400)
    933914    ;  - EFLAGS (reset to RT_BIT(1); not relevant)
    934 
    935 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    936     ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs.
    937     LOADGUESTMSR MSR_K8_LSTAR,          CPUMCTX.msrLSTAR
    938     LOADGUESTMSR MSR_K6_STAR,           CPUMCTX.msrSTAR
    939     LOADGUESTMSR MSR_K8_SF_MASK,        CPUMCTX.msrSFMASK
    940     LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    941 %endif
    942915
    943916%ifdef VBOX_WITH_CRASHDUMP_MAGIC
     
    1018991
    1019992    pop     rsi         ; pCtx (needed in rsi by the macros below)
    1020 
    1021 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    1022     SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    1023     SAVEGUESTMSR MSR_K8_SF_MASK,        CPUMCTX.msrSFMASK
    1024     SAVEGUESTMSR MSR_K6_STAR,           CPUMCTX.msrSTAR
    1025     SAVEGUESTMSR MSR_K8_LSTAR,          CPUMCTX.msrLSTAR
    1026 %endif
    1027993
    1028994%ifdef VMX_USE_CACHED_VMCS_ACCESSES
  • trunk/src/VBox/VMM/include/HMInternal.h

    r49520 r49523  
    4747# define HM_PROFILE_EXIT_DISPATCH
    4848#endif
    49 
    50 /* The MSR auto load/store used to not work for KERNEL_GS_BASE MSR, thus we
    51  * used to handle this MSR manually. See @bugref{6208}. This was clearly visible while
    52  * booting Solaris 11 (11.1 b19) VMs with 2 Cpus. This is no longer the case and we
    53  * always auto load/store the KERNEL_GS_BASE MSR.
    54  *
    55  * Note: don't forget to update the assembly files while modifying this!
    56  */
    57 /** @todo This define should always be in effect and the define itself removed
    58   after 'sufficient' testing. */
    59 # define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    6049
    6150RT_C_DECLS_BEGIN
     
    587576        R0PTRTYPE(void *)           pvMsrBitmap;
    588577
    589 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    590578        /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used
    591579         *  for guest MSRs). */
     
    610598        bool                        fUpdatedHostMsrs;
    611599        uint8_t                     u8Align[7];
    612 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    613600
    614601        /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */
  • trunk/src/VBox/VMM/include/HMInternal.mac

    r47652 r49523  
    2020 %endif
    2121%endif
    22 
    23 %define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    2422
    2523;Maximum number of cached entries.
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette