VirtualBox

Ignore:
Timestamp:
Apr 30, 2013 4:38:34 PM (12 years ago)
Author:
vboxsync
Message:

VMM/VMMR0: Load kernel-gs base MSR as part of the auto-load/store MSR feature.
VMM/HMVMXR0: Fixed the code to also deal with the case when auto-load/store is not defined.
VMM/VMMR0: Fixed LegacyandAmd64.mac to restore the MSRs from the stack in the right order, not sure how it
could have worked previously.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r45823 r45845  
    753753        AssertPtr(pVCpu);
    754754
     755#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    755756        hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
    756757        hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
     758#endif
    757759
    758760        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
     
    803805        VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
    804806        VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
     807#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    805808        VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
    806809        VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
     810#endif
    807811    }
    808812#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
     
    863867        }
    864868
     869#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    865870        /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
    866871        rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
     
    872877        if (RT_FAILURE(rc))
    873878            goto cleanup;
     879#endif
    874880    }
    875881
     
    18261832    rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0);
    18271833
     1834#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    18281835    /* Setup MSR autoloading/autostoring. */
    18291836    Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
     
    18381845    rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,  pVCpu->hm.s.vmx.HCPhysHostMsr);
    18391846    rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0);
     1847#else
     1848    rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
     1849    rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
     1850    rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  0);
     1851#endif
    18401852
    18411853    /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
     
    22352247    AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
    22362248
     2249    int rc = VINF_SUCCESS;
     2250#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    22372251    PVMXMSR  pHostMsr           = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMsr;
    22382252    uint32_t cHostMsrs          = 0;
     
    22432257        pHostMsr->u32IndexMSR = MSR_K6_EFER;
    22442258        pHostMsr->u32Reserved = 0;
    2245 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     2259# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    22462260        if (CPUMIsGuestInLongMode(pVCpu))
    22472261        {
     
    22502264        }
    22512265        else
    2252 #endif
     2266# endif
    22532267            pHostMsr->u64Value = ASMRdMsr(MSR_K6_EFER);
    22542268        pHostMsr++; cHostMsrs++;
    22552269    }
    22562270
    2257 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     2271# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    22582272    if (HMVMX_IS_64BIT_HOST_MODE())
    22592273    {
     
    22642278        pHostMsr->u32IndexMSR  = MSR_K8_LSTAR;
    22652279        pHostMsr->u32Reserved  = 0;
    2266         pHostMsr->u64Value     = ASMRdMsr(MSR_K8_LSTAR);             /* 64 bits mode syscall rip */
     2280        pHostMsr->u64Value     = ASMRdMsr(MSR_K8_LSTAR);             /* 64-bit mode syscall rip */
    22672281        pHostMsr++; cHostMsrs++;
    22682282        pHostMsr->u32IndexMSR  = MSR_K8_SF_MASK;
     
    22702284        pHostMsr->u64Value     = ASMRdMsr(MSR_K8_SF_MASK);           /* syscall flag mask */
    22712285        pHostMsr++; cHostMsrs++;
    2272         /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208}  */
    2273 #if 0
    2274         pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
    2275         pMsr->u32Reserved = 0;
    2276         pMsr->u64Value    = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);         /* swapgs exchange value */
     2286        pHostMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
     2287        pHostMsr->u32Reserved = 0;
     2288        pHostMsr->u64Value    = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);     /* swapgs exchange value */
    22772289        pHostMsr++; cHostMsrs++;
    2278 #endif
    2279     }
    2280 #endif
     2290    }
     2291# endif
    22812292
    22822293    /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
     
    22872298    }
    22882299
    2289     int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs);
     2300    rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs);
     2301#endif  /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    22902302
    22912303    /*
     
    22932305     */
    22942306    rc |= VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS,    ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
    2295 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
     2307# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    22962308    if (HMVMX_IS_64BIT_HOST_MODE())
    22972309    {
     
    23042316        rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP,   ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
    23052317    }
    2306 #elif HC_ARCH_BITS == 32
     2318# elif HC_ARCH_BITS == 32
    23072319    rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP,       ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
    23082320    rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP,       ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
    2309 #else
     2321# else
    23102322    rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP,       ASMRdMsr(MSR_IA32_SYSENTER_ESP));
    23112323    rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP,       ASMRdMsr(MSR_IA32_SYSENTER_EIP));
    2312 #endif
     2324# endif
    23132325    AssertRCReturn(rc, rc);
    23142326
     
    35803592    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
    35813593    {
     3594#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    35823595        PVM pVM             = pVCpu->CTX_SUFF(pVM);
    35833596        PVMXMSR  pGuestMsr  = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
     
    36133626                pGuestMsr->u64Value    = pMixedCtx->msrSFMASK;          /* syscall flag mask */
    36143627                pGuestMsr++; cGuestMsrs++;
    3615                 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208}  */
    3616 #if 0
    36173628                pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
    36183629                pGuestMsr->u32Reserved = 0;
    36193630                pGuestMsr->u64Value    = pMixedCtx->msrKERNELGSBASE;    /* swapgs exchange value */
    36203631                pGuestMsr++; cGuestMsrs++;
    3621 #endif
    36223632            }
    36233633        }
     
    36493659        rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs);
    36503660        AssertRCReturn(rc, rc);
     3661#endif  /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    36513662
    36523663        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS;
     
    50155026        return VINF_SUCCESS;
    50165027
     5028#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    50175029    for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++)
    50185030    {
     
    50225034        {
    50235035            case MSR_K8_LSTAR:          pMixedCtx->msrLSTAR  = pMsr->u64Value;                   break;
    5024             case MSR_K6_STAR:           pMixedCtx->msrSTAR   = pMsr->u64Value;                    break;
     5036            case MSR_K6_STAR:           pMixedCtx->msrSTAR   = pMsr->u64Value;                   break;
    50255037            case MSR_K8_SF_MASK:        pMixedCtx->msrSFMASK = pMsr->u64Value;                   break;
    50265038            case MSR_K8_TSC_AUX:        CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value);  break;
    5027 #if 0
    5028             /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208}  */
    50295039            case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value;             break;
    5030 #endif
    50315040            case MSR_K6_EFER:          /* EFER can't be changed without causing a VM-exit. */    break;
    50325041            default:
     
    50375046        }
    50385047    }
     5048#endif
     5049
    50395050    pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS;
    50405051    return VINF_SUCCESS;
     
    65706581    }
    65716582
     6583#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     6584    /*
     6585     * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
     6586     * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
     6587     */
     6588    if (    (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     6589        && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
     6590    {
     6591        pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
     6592        uint64_t u64HostTscAux = 0;
     6593        int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux);
     6594        AssertRC(rc2);
     6595        ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux);
     6596    }
     6597#endif
     6598
    65726599    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
    65736600    TMNotifyStartOfExecution(pVCpu);                            /* Finally, notify TM to resume its clocks as we're about
     
    66136640    Assert(!(ASMGetFlags() & X86_EFL_IF));
    66146641    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
     6642
     6643#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     6644    /*
     6645     * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
     6646     * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
     6647     */
     6648    if (    (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     6649        && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
     6650    {
     6651        pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
     6652        uint64_t u64HostTscAux = 0;
     6653        int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux);
     6654        AssertRC(rc2);
     6655        ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux);
     6656    }
     6657#endif
    66156658
    66166659    /* Restore the effects of TPR patching if any. */
     
    76547697                case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;
    76557698                case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;
    7656                 case MSR_K8_KERNEL_GS_BASE: /* If we auto-load it, update HM_CHANGED_VMX_GUEST_AUTO_MSRS. */   break;
    76577699                case MSR_K8_FS_BASE:        /* no break */
    76587700                case MSR_K8_GS_BASE:        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS;     break;
     7701                /* MSR_K8_KERNEL_GS_BASE: Nothing to do as it's not part of the VMCS. Manually loaded each time on VM-entry. */
    76597702            }
    76607703        }
     
    76707713                case MSR_K8_FS_BASE:
    76717714                case MSR_K8_GS_BASE:
     7715                {
    76727716                    AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
    76737717                    return VERR_VMX_UNEXPECTED_EXIT_CODE;
     7718                }
    76747719
    76757720                case MSR_K8_LSTAR:
     
    76777722                case MSR_K8_SF_MASK:
    76787723                case MSR_K8_TSC_AUX:
     7724                case MSR_K8_KERNEL_GS_BASE:
    76797725                {
    76807726                    AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
    7681                                      pMixedCtx->ecx));
    7682                     return VERR_VMX_UNEXPECTED_EXIT_CODE;
    7683                 }
    7684 
    7685                 case MSR_K8_KERNEL_GS_BASE:
    7686                 {
    7687                     AssertMsgFailed(("Unexpected WRMSR for an MSR that is manually loaded/stored on every VM-exit. ecx=%#RX32\n",
    76887727                                     pMixedCtx->ecx));
    76897728                    return VERR_VMX_UNEXPECTED_EXIT_CODE;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette