Changeset 45845 in vbox for trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
- Timestamp:
- Apr 30, 2013 4:38:34 PM (12 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r45823 r45845 753 753 AssertPtr(pVCpu); 754 754 755 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 755 756 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr); 756 757 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr); 758 #endif 757 759 758 760 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) … … 803 805 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb); 804 806 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv); 807 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 805 808 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv); 806 809 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv); 810 #endif 807 811 } 808 812 #undef VMXLOCAL_INIT_VMCPU_MEMOBJ … … 863 867 } 864 868 869 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 865 870 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */ 866 871 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr); … … 872 877 if (RT_FAILURE(rc)) 873 878 goto cleanup; 879 #endif 874 880 } 875 881 … … 1826 1832 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); 1827 1833 1834 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 1828 1835 /* Setup MSR autoloading/autostoring. */ 1829 1836 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr); … … 1838 1845 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr); 1839 1846 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); 1847 #else 1848 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); 1849 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); 1850 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); 1851 #endif 1840 1852 1841 1853 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */ … … 2235 2247 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr); 2236 2248 2249 int rc = VINF_SUCCESS; 2250 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 2237 2251 PVMXMSR pHostMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMsr; 2238 2252 uint32_t cHostMsrs = 0; … … 2243 2257 pHostMsr->u32IndexMSR = MSR_K6_EFER; 2244 2258 pHostMsr->u32Reserved = 0; 2245 # if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)2259 # if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 2246 2260 if (CPUMIsGuestInLongMode(pVCpu)) 2247 2261 { … … 2250 2264 } 2251 2265 else 2252 # endif2266 # endif 2253 2267 pHostMsr->u64Value = ASMRdMsr(MSR_K6_EFER); 2254 2268 pHostMsr++; cHostMsrs++; 2255 2269 } 2256 2270 2257 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)2271 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 2258 2272 if (HMVMX_IS_64BIT_HOST_MODE()) 2259 2273 { … … 2264 2278 pHostMsr->u32IndexMSR = MSR_K8_LSTAR; 2265 2279 pHostMsr->u32Reserved = 0; 2266 pHostMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64 bitsmode syscall rip */2280 pHostMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64-bit mode syscall rip */ 2267 2281 pHostMsr++; cHostMsrs++; 2268 2282 pHostMsr->u32IndexMSR = MSR_K8_SF_MASK; … … 2270 2284 pHostMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */ 2271 2285 pHostMsr++; cHostMsrs++; 2272 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */ 2273 #if 0 2274 pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE; 2275 pMsr->u32Reserved = 0; 2276 pMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */ 2286 pHostMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE; 2287 pHostMsr->u32Reserved = 0; 2288 pHostMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */ 2277 2289 pHostMsr++; cHostMsrs++; 2278 #endif 2279 } 2280 #endif 2290 } 2291 # endif 2281 2292 2282 2293 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */ … … 2287 2298 } 2288 2299 2289 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs); 2300 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs); 2301 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 2290 2302 2291 2303 /* … … 2293 2305 */ 2294 2306 rc |= VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)); 2295 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL2307 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2296 2308 if (HMVMX_IS_64BIT_HOST_MODE()) 2297 2309 { … … 2304 2316 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 2305 2317 } 2306 # elif HC_ARCH_BITS == 322318 # elif HC_ARCH_BITS == 32 2307 2319 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 2308 2320 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 2309 # else2321 # else 2310 2322 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); 2311 2323 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP)); 2312 # endif2324 # endif 2313 2325 AssertRCReturn(rc, rc); 2314 2326 … … 3580 3592 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS) 3581 3593 { 3594 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 3582 3595 PVM pVM = pVCpu->CTX_SUFF(pVM); 3583 3596 PVMXMSR pGuestMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr; … … 3613 3626 pGuestMsr->u64Value = pMixedCtx->msrSFMASK; /* syscall flag mask */ 3614 3627 pGuestMsr++; cGuestMsrs++; 3615 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */3616 #if 03617 3628 pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE; 3618 3629 pGuestMsr->u32Reserved = 0; 3619 3630 pGuestMsr->u64Value = pMixedCtx->msrKERNELGSBASE; /* swapgs exchange value */ 3620 3631 pGuestMsr++; cGuestMsrs++; 3621 #endif3622 3632 } 3623 3633 } … … 3649 3659 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs); 3650 3660 AssertRCReturn(rc, rc); 3661 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 3651 3662 3652 3663 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS; … … 5015 5026 return VINF_SUCCESS; 5016 5027 5028 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 5017 5029 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++) 5018 5030 { … … 5022 5034 { 5023 5035 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break; 5024 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; 5036 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break; 5025 5037 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break; 5026 5038 case MSR_K8_TSC_AUX: CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); break; 5027 #if 05028 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */5029 5039 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break; 5030 #endif5031 5040 case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit. */ break; 5032 5041 default: … … 5037 5046 } 5038 5047 } 5048 #endif 5049 5039 5050 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS; 5040 5051 return VINF_SUCCESS; … … 6570 6581 } 6571 6582 6583 #ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 6584 /* 6585 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that 6586 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}. 6587 */ 6588 if ( (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 6589 && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)) 6590 { 6591 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX); 6592 uint64_t u64HostTscAux = 0; 6593 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux); 6594 AssertRC(rc2); 6595 ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux); 6596 } 6597 #endif 6598 6572 6599 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x); 6573 6600 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about … … 6613 6640 Assert(!(ASMGetFlags() & X86_EFL_IF)); 6614 6641 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 6642 6643 #ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 6644 /* 6645 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that 6646 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}. 6647 */ 6648 if ( (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 6649 && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)) 6650 { 6651 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX); 6652 uint64_t u64HostTscAux = 0; 6653 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux); 6654 AssertRC(rc2); 6655 ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux); 6656 } 6657 #endif 6615 6658 6616 6659 /* Restore the effects of TPR patching if any. */ … … 7654 7697 case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break; 7655 7698 case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break; 7656 case MSR_K8_KERNEL_GS_BASE: /* If we auto-load it, update HM_CHANGED_VMX_GUEST_AUTO_MSRS. */ break;7657 7699 case MSR_K8_FS_BASE: /* no break */ 7658 7700 case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS; break; 7701 /* MSR_K8_KERNEL_GS_BASE: Nothing to do as it's not part of the VMCS. Manually loaded each time on VM-entry. */ 7659 7702 } 7660 7703 } … … 7670 7713 case MSR_K8_FS_BASE: 7671 7714 case MSR_K8_GS_BASE: 7715 { 7672 7716 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx)); 7673 7717 return VERR_VMX_UNEXPECTED_EXIT_CODE; 7718 } 7674 7719 7675 7720 case MSR_K8_LSTAR: … … 7677 7722 case MSR_K8_SF_MASK: 7678 7723 case MSR_K8_TSC_AUX: 7724 case MSR_K8_KERNEL_GS_BASE: 7679 7725 { 7680 7726 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", 7681 pMixedCtx->ecx));7682 return VERR_VMX_UNEXPECTED_EXIT_CODE;7683 }7684 7685 case MSR_K8_KERNEL_GS_BASE:7686 {7687 AssertMsgFailed(("Unexpected WRMSR for an MSR that is manually loaded/stored on every VM-exit. ecx=%#RX32\n",7688 7727 pMixedCtx->ecx)); 7689 7728 return VERR_VMX_UNEXPECTED_EXIT_CODE;
Note:
See TracChangeset
for help on using the changeset viewer.