VirtualBox

Changeset 45845 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Apr 30, 2013 4:38:34 PM (12 years ago)
Author:
vboxsync
Message:

VMM/VMMR0: Load kernel-gs base MSR as part of the auto-load/store MSR feature.
VMM/HMVMXR0: Fixed the code to also deal with the case when auto-load/store is not defined.
VMM/VMMR0: Fixed LegacyandAmd64.mac to restore the MSRs from the stack in the right order, not sure how it
could have worked previously.

Location:
trunk/src/VBox/VMM
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac

    r45341 r45845  
    420420    LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
    421421    LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
    422 %endif
    423     ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}
    424422    LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     423%endif
    425424
    426425    ; Save the pCtx pointer
     
    556555    pop     xSI         ; pCtx (needed in rsi by the macros below)
    557556
    558     ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}.
    559     LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    560557%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    561558    ; Save the guest MSRs and load the host MSRs
     559    LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    562560    LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
    563561    LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
     
    628626    pop     xSI         ; pCtx (needed in rsi by the macros below)
    629627
    630     ; Kernel GS base is special, we need to manually load/store it See @bugref{6208}.
    631     LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    632628%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    633629    ; Load the host MSRs
     630    LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
    634631    LOADHOSTMSR MSR_K8_SF_MASK
    635632    LOADHOSTMSR MSR_K6_STAR
     
    674671    pop     xSI         ; pCtx (needed in rsi by the macros below)
    675672
    676     ; Kernel GS base is special, load it manually. See @bugref{6208}.
    677     LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    678673%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    679674    ; Load the host MSRs
     675    LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
    680676    LOADHOSTMSR MSR_K8_SF_MASK
    681677    LOADHOSTMSR MSR_K6_STAR
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r45823 r45845  
    753753        AssertPtr(pVCpu);
    754754
     755#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    755756        hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
    756757        hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
     758#endif
    757759
    758760        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
     
    803805        VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
    804806        VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
     807#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    805808        VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
    806809        VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
     810#endif
    807811    }
    808812#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
     
    863867        }
    864868
     869#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    865870        /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
    866871        rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
     
    872877        if (RT_FAILURE(rc))
    873878            goto cleanup;
     879#endif
    874880    }
    875881
     
    18261832    rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0);
    18271833
     1834#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    18281835    /* Setup MSR autoloading/autostoring. */
    18291836    Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
     
    18381845    rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,  pVCpu->hm.s.vmx.HCPhysHostMsr);
    18391846    rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0);
     1847#else
     1848    rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
     1849    rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
     1850    rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  0);
     1851#endif
    18401852
    18411853    /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
     
    22352247    AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
    22362248
     2249    int rc = VINF_SUCCESS;
     2250#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    22372251    PVMXMSR  pHostMsr           = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMsr;
    22382252    uint32_t cHostMsrs          = 0;
     
    22432257        pHostMsr->u32IndexMSR = MSR_K6_EFER;
    22442258        pHostMsr->u32Reserved = 0;
    2245 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     2259# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    22462260        if (CPUMIsGuestInLongMode(pVCpu))
    22472261        {
     
    22502264        }
    22512265        else
    2252 #endif
     2266# endif
    22532267            pHostMsr->u64Value = ASMRdMsr(MSR_K6_EFER);
    22542268        pHostMsr++; cHostMsrs++;
    22552269    }
    22562270
    2257 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     2271# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    22582272    if (HMVMX_IS_64BIT_HOST_MODE())
    22592273    {
     
    22642278        pHostMsr->u32IndexMSR  = MSR_K8_LSTAR;
    22652279        pHostMsr->u32Reserved  = 0;
    2266         pHostMsr->u64Value     = ASMRdMsr(MSR_K8_LSTAR);             /* 64 bits mode syscall rip */
     2280        pHostMsr->u64Value     = ASMRdMsr(MSR_K8_LSTAR);             /* 64-bit mode syscall rip */
    22672281        pHostMsr++; cHostMsrs++;
    22682282        pHostMsr->u32IndexMSR  = MSR_K8_SF_MASK;
     
    22702284        pHostMsr->u64Value     = ASMRdMsr(MSR_K8_SF_MASK);           /* syscall flag mask */
    22712285        pHostMsr++; cHostMsrs++;
    2272         /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208}  */
    2273 #if 0
    2274         pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
    2275         pMsr->u32Reserved = 0;
    2276         pMsr->u64Value    = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);         /* swapgs exchange value */
     2286        pHostMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
     2287        pHostMsr->u32Reserved = 0;
     2288        pHostMsr->u64Value    = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);     /* swapgs exchange value */
    22772289        pHostMsr++; cHostMsrs++;
    2278 #endif
    2279     }
    2280 #endif
     2290    }
     2291# endif
    22812292
    22822293    /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
     
    22872298    }
    22882299
    2289     int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs);
     2300    rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs);
     2301#endif  /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    22902302
    22912303    /*
     
    22932305     */
    22942306    rc |= VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS,    ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
    2295 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
     2307# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    22962308    if (HMVMX_IS_64BIT_HOST_MODE())
    22972309    {
     
    23042316        rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP,   ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
    23052317    }
    2306 #elif HC_ARCH_BITS == 32
     2318# elif HC_ARCH_BITS == 32
    23072319    rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP,       ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
    23082320    rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP,       ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
    2309 #else
     2321# else
    23102322    rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP,       ASMRdMsr(MSR_IA32_SYSENTER_ESP));
    23112323    rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP,       ASMRdMsr(MSR_IA32_SYSENTER_EIP));
    2312 #endif
     2324# endif
    23132325    AssertRCReturn(rc, rc);
    23142326
     
    35803592    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
    35813593    {
     3594#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    35823595        PVM pVM             = pVCpu->CTX_SUFF(pVM);
    35833596        PVMXMSR  pGuestMsr  = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
     
    36133626                pGuestMsr->u64Value    = pMixedCtx->msrSFMASK;          /* syscall flag mask */
    36143627                pGuestMsr++; cGuestMsrs++;
    3615                 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208}  */
    3616 #if 0
    36173628                pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
    36183629                pGuestMsr->u32Reserved = 0;
    36193630                pGuestMsr->u64Value    = pMixedCtx->msrKERNELGSBASE;    /* swapgs exchange value */
    36203631                pGuestMsr++; cGuestMsrs++;
    3621 #endif
    36223632            }
    36233633        }
     
    36493659        rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs);
    36503660        AssertRCReturn(rc, rc);
     3661#endif  /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    36513662
    36523663        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS;
     
    50155026        return VINF_SUCCESS;
    50165027
     5028#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    50175029    for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++)
    50185030    {
     
    50225034        {
    50235035            case MSR_K8_LSTAR:          pMixedCtx->msrLSTAR  = pMsr->u64Value;                   break;
    5024             case MSR_K6_STAR:           pMixedCtx->msrSTAR   = pMsr->u64Value;                    break;
     5036            case MSR_K6_STAR:           pMixedCtx->msrSTAR   = pMsr->u64Value;                   break;
    50255037            case MSR_K8_SF_MASK:        pMixedCtx->msrSFMASK = pMsr->u64Value;                   break;
    50265038            case MSR_K8_TSC_AUX:        CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value);  break;
    5027 #if 0
    5028             /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208}  */
    50295039            case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value;             break;
    5030 #endif
    50315040            case MSR_K6_EFER:          /* EFER can't be changed without causing a VM-exit. */    break;
    50325041            default:
     
    50375046        }
    50385047    }
     5048#endif
     5049
    50395050    pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS;
    50405051    return VINF_SUCCESS;
     
    65706581    }
    65716582
     6583#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     6584    /*
     6585     * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
     6586     * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
     6587     */
     6588    if (    (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     6589        && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
     6590    {
     6591        pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
     6592        uint64_t u64HostTscAux = 0;
     6593        int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux);
     6594        AssertRC(rc2);
     6595        ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux);
     6596    }
     6597#endif
     6598
    65726599    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
    65736600    TMNotifyStartOfExecution(pVCpu);                            /* Finally, notify TM to resume its clocks as we're about
     
    66136640    Assert(!(ASMGetFlags() & X86_EFL_IF));
    66146641    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
     6642
     6643#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     6644    /*
     6645     * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
     6646     * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
     6647     */
     6648    if (    (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     6649        && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
     6650    {
     6651        pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
     6652        uint64_t u64HostTscAux = 0;
     6653        int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux);
     6654        AssertRC(rc2);
     6655        ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux);
     6656    }
     6657#endif
    66156658
    66166659    /* Restore the effects of TPR patching if any. */
     
    76547697                case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;
    76557698                case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;
    7656                 case MSR_K8_KERNEL_GS_BASE: /* If we auto-load it, update HM_CHANGED_VMX_GUEST_AUTO_MSRS. */   break;
    76577699                case MSR_K8_FS_BASE:        /* no break */
    76587700                case MSR_K8_GS_BASE:        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS;     break;
     7701                /* MSR_K8_KERNEL_GS_BASE: Nothing to do as it's not part of the VMCS. Manually loaded each time on VM-entry. */
    76597702            }
    76607703        }
     
    76707713                case MSR_K8_FS_BASE:
    76717714                case MSR_K8_GS_BASE:
     7715                {
    76727716                    AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
    76737717                    return VERR_VMX_UNEXPECTED_EXIT_CODE;
     7718                }
    76747719
    76757720                case MSR_K8_LSTAR:
     
    76777722                case MSR_K8_SF_MASK:
    76787723                case MSR_K8_TSC_AUX:
     7724                case MSR_K8_KERNEL_GS_BASE:
    76797725                {
    76807726                    AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
    7681                                      pMixedCtx->ecx));
    7682                     return VERR_VMX_UNEXPECTED_EXIT_CODE;
    7683                 }
    7684 
    7685                 case MSR_K8_KERNEL_GS_BASE:
    7686                 {
    7687                     AssertMsgFailed(("Unexpected WRMSR for an MSR that is manually loaded/stored on every VM-exit. ecx=%#RX32\n",
    76887727                                     pMixedCtx->ecx));
    76897728                    return VERR_VMX_UNEXPECTED_EXIT_CODE;
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r45786 r45845  
    15331533            pMsr++; idxMsr++;
    15341534
    1535             /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208}  */
    1536 #if 0
     1535            /* The KERNEL_GS_BASE MSR was previously not working reliably with auto load/store. See @bugref{6208}  */
    15371536            pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
    15381537            pMsr->u32Reserved = 0;
    15391538            pMsr->u64Value    = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);    /* swapgs exchange value */
    15401539            pMsr++; idxMsr++;
    1541 #endif
    15421540        }
    15431541# endif
     
    23892387            pMsr++; idxMsr++;
    23902388
    2391             /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208}  */
    2392 #if 0
     2389            /* The KERNEL_GS_BASE MSR was previously not working reliably with auto load/store. See @bugref{6208}  */
    23932390            pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
    23942391            pMsr->u32Reserved = 0;
    23952392            pMsr->u64Value    = pCtx->msrKERNELGSBASE;    /* swapgs exchange value */
    23962393            pMsr++; idxMsr++;
    2397 #endif
    23982394        }
    23992395    }
     
    25722568                pCtx->msrSFMASK = pMsr->u64Value;
    25732569                break;
    2574             /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208}  */
    2575 #if 0
     2570            /* The KERNEL_GS_BASE MSR was previously not working reliably with auto load/store. See @bugref{6208}  */
    25762571            case MSR_K8_KERNEL_GS_BASE:
    25772572                pCtx->msrKERNELGSBASE = pMsr->u64Value;
    25782573                break;
    2579 #endif
    25802574            case MSR_K8_TSC_AUX:
    25812575                CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value);
     
    33513345        && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
    33523346    {
    3353         pVCpu->hm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX);
    3354         uint64_t u64GuestTSCAux = 0;
    3355         rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTSCAux);
     3347        pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
     3348        uint64_t u64HostTscAux = 0;
     3349        rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux);
    33563350        AssertRC(rc2);
    3357         ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTSCAux);
     3351        ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux);
    33583352    }
    33593353#endif
     
    33733367        /* Restore host's TSC_AUX. */
    33743368        if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    3375             ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTSCAux);
     3369            ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
    33763370#endif
    33773371
  • trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac

    r45786 r45845  
    722722    LOADGUESTMSR MSR_K6_STAR,           CPUMCTX.msrSTAR
    723723    LOADGUESTMSR MSR_K8_SF_MASK,        CPUMCTX.msrSFMASK
    724 %endif
    725     ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}.
    726724    LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     725%endif
    727726
    728727%ifdef VBOX_WITH_CRASHDUMP_MAGIC
     
    801800
    802801%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     802    SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     803    SAVEGUESTMSR MSR_K8_SF_MASK,        CPUMCTX.msrSFMASK
     804    SAVEGUESTMSR MSR_K6_STAR,           CPUMCTX.msrSTAR
    803805    SAVEGUESTMSR MSR_K8_LSTAR,          CPUMCTX.msrLSTAR
    804     SAVEGUESTMSR MSR_K6_STAR,           CPUMCTX.msrSTAR
    805     SAVEGUESTMSR MSR_K8_SF_MASK,        CPUMCTX.msrSFMASK
    806 %endif
    807     ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}.
    808     SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     806%endif
    809807
    810808%ifdef VMX_USE_CACHED_VMCS_ACCESSES
  • trunk/src/VBox/VMM/include/HMInternal.h

    r45804 r45845  
    5050#endif
    5151
    52 /* The MSR auto load/store does not work for KERNEL_GS_BASE MSR, thus we
    53  * handle this MSR manually. See @bugref{6208}. This is clearly visible while
    54  * booting Solaris 11 (11.1 b19) VMs with 2 Cpus.
     52/* The MSR auto load/store used to not work for KERNEL_GS_BASE MSR, thus we
     53 * used to handle this MSR manually. See @bugref{6208}. This was clearly visible while
     54 * booting Solaris 11 (11.1 b19) VMs with 2 Cpus. This is no longer the case and we
     55 * always auto load/store the KERNEL_GS_BASE MSR.
    5556 *
    5657 * Note: don't forget to update the assembly files while modifying this!
    5758 */
     59/** @todo This define should always be in effect and the define itself removed
     60  after 'sufficient' testing. */
    5861# define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    5962
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette