Changeset 45845 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Apr 30, 2013 4:38:34 PM (12 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac
r45341 r45845 420 420 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR 421 421 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK 422 %endif423 ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}424 422 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 423 %endif 425 424 426 425 ; Save the pCtx pointer … … 556 555 pop xSI ; pCtx (needed in rsi by the macros below) 557 556 558 ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}.559 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE560 557 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 561 558 ; Save the guest MSRs and load the host MSRs 559 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 562 560 LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK 563 561 LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR … … 628 626 pop xSI ; pCtx (needed in rsi by the macros below) 629 627 630 ; Kernel GS base is special, we need to manually load/store it See @bugref{6208}.631 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE632 628 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 633 629 ; Load the host MSRs 630 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE 634 631 LOADHOSTMSR MSR_K8_SF_MASK 635 632 LOADHOSTMSR MSR_K6_STAR … … 674 671 pop xSI ; pCtx (needed in rsi by the macros below) 675 672 676 ; Kernel GS base is special, load it manually. See @bugref{6208}.677 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE678 673 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 679 674 ; Load the host MSRs 675 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE 680 676 LOADHOSTMSR MSR_K8_SF_MASK 681 677 LOADHOSTMSR MSR_K6_STAR -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r45823 r45845 753 753 AssertPtr(pVCpu); 754 754 755 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 755 756 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr); 756 757 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr); 758 #endif 757 759 758 760 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) … … 803 805 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb); 804 806 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv); 807 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 805 808 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv); 806 809 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv); 810 #endif 807 811 } 808 812 #undef VMXLOCAL_INIT_VMCPU_MEMOBJ … … 863 867 } 864 868 869 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 865 870 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */ 866 871 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr); … … 872 877 if (RT_FAILURE(rc)) 873 878 goto cleanup; 879 #endif 874 880 } 875 881 … … 1826 1832 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); 1827 1833 1834 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 1828 1835 /* Setup MSR autoloading/autostoring. */ 1829 1836 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr); … … 1838 1845 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr); 1839 1846 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); 1847 #else 1848 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); 1849 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); 1850 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); 1851 #endif 1840 1852 1841 1853 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */ … … 2235 2247 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr); 2236 2248 2249 int rc = VINF_SUCCESS; 2250 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 2237 2251 PVMXMSR pHostMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMsr; 2238 2252 uint32_t cHostMsrs = 0; … … 2243 2257 pHostMsr->u32IndexMSR = MSR_K6_EFER; 2244 2258 pHostMsr->u32Reserved = 0; 2245 # if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)2259 # if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 2246 2260 if (CPUMIsGuestInLongMode(pVCpu)) 2247 2261 { … … 2250 2264 } 2251 2265 else 2252 # endif2266 # endif 2253 2267 pHostMsr->u64Value = ASMRdMsr(MSR_K6_EFER); 2254 2268 pHostMsr++; cHostMsrs++; 2255 2269 } 2256 2270 2257 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)2271 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 2258 2272 if (HMVMX_IS_64BIT_HOST_MODE()) 2259 2273 { … … 2264 2278 pHostMsr->u32IndexMSR = MSR_K8_LSTAR; 2265 2279 pHostMsr->u32Reserved = 0; 2266 pHostMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64 bitsmode syscall rip */2280 pHostMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64-bit mode syscall rip */ 2267 2281 pHostMsr++; cHostMsrs++; 2268 2282 pHostMsr->u32IndexMSR = MSR_K8_SF_MASK; … … 2270 2284 pHostMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */ 2271 2285 pHostMsr++; cHostMsrs++; 2272 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */ 2273 #if 0 2274 pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE; 2275 pMsr->u32Reserved = 0; 2276 pMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */ 2286 pHostMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE; 2287 pHostMsr->u32Reserved = 0; 2288 pHostMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */ 2277 2289 pHostMsr++; cHostMsrs++; 2278 #endif 2279 } 2280 #endif 2290 } 2291 # endif 2281 2292 2282 2293 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */ … … 2287 2298 } 2288 2299 2289 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs); 2300 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs); 2301 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 2290 2302 2291 2303 /* … … 2293 2305 */ 2294 2306 rc |= VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)); 2295 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL2307 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2296 2308 if (HMVMX_IS_64BIT_HOST_MODE()) 2297 2309 { … … 2304 2316 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 2305 2317 } 2306 # elif HC_ARCH_BITS == 322318 # elif HC_ARCH_BITS == 32 2307 2319 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 2308 2320 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 2309 # else2321 # else 2310 2322 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); 2311 2323 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP)); 2312 # endif2324 # endif 2313 2325 AssertRCReturn(rc, rc); 2314 2326 … … 3580 3592 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS) 3581 3593 { 3594 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 3582 3595 PVM pVM = pVCpu->CTX_SUFF(pVM); 3583 3596 PVMXMSR pGuestMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr; … … 3613 3626 pGuestMsr->u64Value = pMixedCtx->msrSFMASK; /* syscall flag mask */ 3614 3627 pGuestMsr++; cGuestMsrs++; 3615 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */3616 #if 03617 3628 pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE; 3618 3629 pGuestMsr->u32Reserved = 0; 3619 3630 pGuestMsr->u64Value = pMixedCtx->msrKERNELGSBASE; /* swapgs exchange value */ 3620 3631 pGuestMsr++; cGuestMsrs++; 3621 #endif3622 3632 } 3623 3633 } … … 3649 3659 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs); 3650 3660 AssertRCReturn(rc, rc); 3661 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 3651 3662 3652 3663 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS; … … 5015 5026 return VINF_SUCCESS; 5016 5027 5028 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 5017 5029 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++) 5018 5030 { … … 5022 5034 { 5023 5035 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break; 5024 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; 5036 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break; 5025 5037 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break; 5026 5038 case MSR_K8_TSC_AUX: CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); break; 5027 #if 05028 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */5029 5039 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break; 5030 #endif5031 5040 case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit. */ break; 5032 5041 default: … … 5037 5046 } 5038 5047 } 5048 #endif 5049 5039 5050 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS; 5040 5051 return VINF_SUCCESS; … … 6570 6581 } 6571 6582 6583 #ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 6584 /* 6585 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that 6586 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}. 6587 */ 6588 if ( (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 6589 && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)) 6590 { 6591 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX); 6592 uint64_t u64HostTscAux = 0; 6593 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux); 6594 AssertRC(rc2); 6595 ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux); 6596 } 6597 #endif 6598 6572 6599 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x); 6573 6600 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about … … 6613 6640 Assert(!(ASMGetFlags() & X86_EFL_IF)); 6614 6641 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 6642 6643 #ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 6644 /* 6645 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that 6646 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}. 6647 */ 6648 if ( (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 6649 && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)) 6650 { 6651 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX); 6652 uint64_t u64HostTscAux = 0; 6653 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux); 6654 AssertRC(rc2); 6655 ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux); 6656 } 6657 #endif 6615 6658 6616 6659 /* Restore the effects of TPR patching if any. */ … … 7654 7697 case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break; 7655 7698 case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break; 7656 case MSR_K8_KERNEL_GS_BASE: /* If we auto-load it, update HM_CHANGED_VMX_GUEST_AUTO_MSRS. */ break;7657 7699 case MSR_K8_FS_BASE: /* no break */ 7658 7700 case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS; break; 7701 /* MSR_K8_KERNEL_GS_BASE: Nothing to do as it's not part of the VMCS. Manually loaded each time on VM-entry. */ 7659 7702 } 7660 7703 } … … 7670 7713 case MSR_K8_FS_BASE: 7671 7714 case MSR_K8_GS_BASE: 7715 { 7672 7716 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx)); 7673 7717 return VERR_VMX_UNEXPECTED_EXIT_CODE; 7718 } 7674 7719 7675 7720 case MSR_K8_LSTAR: … … 7677 7722 case MSR_K8_SF_MASK: 7678 7723 case MSR_K8_TSC_AUX: 7724 case MSR_K8_KERNEL_GS_BASE: 7679 7725 { 7680 7726 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", 7681 pMixedCtx->ecx));7682 return VERR_VMX_UNEXPECTED_EXIT_CODE;7683 }7684 7685 case MSR_K8_KERNEL_GS_BASE:7686 {7687 AssertMsgFailed(("Unexpected WRMSR for an MSR that is manually loaded/stored on every VM-exit. ecx=%#RX32\n",7688 7727 pMixedCtx->ecx)); 7689 7728 return VERR_VMX_UNEXPECTED_EXIT_CODE; -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r45786 r45845 1533 1533 pMsr++; idxMsr++; 1534 1534 1535 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */ 1536 #if 0 1535 /* The KERNEL_GS_BASE MSR was previously not working reliably with auto load/store. See @bugref{6208} */ 1537 1536 pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE; 1538 1537 pMsr->u32Reserved = 0; 1539 1538 pMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */ 1540 1539 pMsr++; idxMsr++; 1541 #endif1542 1540 } 1543 1541 # endif … … 2389 2387 pMsr++; idxMsr++; 2390 2388 2391 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */ 2392 #if 0 2389 /* The KERNEL_GS_BASE MSR was previously not working reliably with auto load/store. See @bugref{6208} */ 2393 2390 pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE; 2394 2391 pMsr->u32Reserved = 0; 2395 2392 pMsr->u64Value = pCtx->msrKERNELGSBASE; /* swapgs exchange value */ 2396 2393 pMsr++; idxMsr++; 2397 #endif2398 2394 } 2399 2395 } … … 2572 2568 pCtx->msrSFMASK = pMsr->u64Value; 2573 2569 break; 2574 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */ 2575 #if 0 2570 /* The KERNEL_GS_BASE MSR was previously not working reliably with auto load/store. See @bugref{6208} */ 2576 2571 case MSR_K8_KERNEL_GS_BASE: 2577 2572 pCtx->msrKERNELGSBASE = pMsr->u64Value; 2578 2573 break; 2579 #endif2580 2574 case MSR_K8_TSC_AUX: 2581 2575 CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); … … 3351 3345 && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)) 3352 3346 { 3353 pVCpu->hm.s.u64HostT SCAux = ASMRdMsr(MSR_K8_TSC_AUX);3354 uint64_t u64 GuestTSCAux = 0;3355 rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64 GuestTSCAux);3347 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX); 3348 uint64_t u64HostTscAux = 0; 3349 rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux); 3356 3350 AssertRC(rc2); 3357 ASMWrMsr(MSR_K8_TSC_AUX, u64 GuestTSCAux);3351 ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux); 3358 3352 } 3359 3353 #endif … … 3373 3367 /* Restore host's TSC_AUX. */ 3374 3368 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 3375 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostT SCAux);3369 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux); 3376 3370 #endif 3377 3371 -
trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
r45786 r45845 722 722 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR 723 723 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK 724 %endif725 ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}.726 724 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 725 %endif 727 726 728 727 %ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 801 800 802 801 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 802 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 803 SAVEGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK 804 SAVEGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR 803 805 SAVEGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR 804 SAVEGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR 805 SAVEGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK 806 %endif 807 ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}. 808 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 806 %endif 809 807 810 808 %ifdef VMX_USE_CACHED_VMCS_ACCESSES -
trunk/src/VBox/VMM/include/HMInternal.h
r45804 r45845 50 50 #endif 51 51 52 /* The MSR auto load/store does not work for KERNEL_GS_BASE MSR, thus we 53 * handle this MSR manually. See @bugref{6208}. This is clearly visible while 54 * booting Solaris 11 (11.1 b19) VMs with 2 Cpus. 52 /* The MSR auto load/store used to not work for KERNEL_GS_BASE MSR, thus we 53 * used to handle this MSR manually. See @bugref{6208}. This was clearly visible while 54 * booting Solaris 11 (11.1 b19) VMs with 2 Cpus. This is no longer the case and we 55 * always auto load/store the KERNEL_GS_BASE MSR. 55 56 * 56 57 * Note: don't forget to update the assembly files while modifying this! 57 58 */ 59 /** @todo This define should always be in effect and the define itself removed 60 after 'sufficient' testing. */ 58 61 # define VBOX_WITH_AUTO_MSR_LOAD_RESTORE 59 62
Note:
See TracChangeset
for help on using the changeset viewer.