Changeset 68406 in vbox
- Timestamp:
- Aug 14, 2017 10:22:55 AM (8 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r68364 r68406 912 912 } 913 913 914 #ifdef VBOX_WITH_NESTED_HWVIRT 915 /* 916 * Only if the nested hypervisor says it does not need to flush anything in the TLB, 917 * can we possibly apply it on the host. Otherwise, the nested-guest TLB flush setting 918 * should be used and then the host settings be added on top. 919 */ 920 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 921 { 922 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 923 if (pVmcbNstGstCache->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING) 924 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING; 925 else 926 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = pVmcbNstGstCache->TLBCtrl.n.u8TLBFlush; 927 } 928 #else 929 RT_NOREF(pCtx); 930 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING; 931 #endif 932 914 /* 915 * If the AMD CPU erratum 170, We need to flush the entire TLB for each world switch. Sad. 916 * This Host CPU requirement takes precedence. 917 */ 933 918 if (pVM->hm.s.svm.fAlwaysFlushTLB) 934 919 { 935 /*936 * This is the AMD erratum 170. We need to flush the entire TLB for each world switch. Sad.937 */938 920 pCpu->uCurrentAsid = 1; 939 921 pVCpu->hm.s.uCurrentAsid = 1; … … 948 930 pVCpu->hm.s.idLastCpu = pCpu->idCpu; 949 931 } 950 else if (pVCpu->hm.s.fForceTLBFlush) 951 { 952 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */ 953 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP; 954 955 if (fNewAsid) 956 { 957 ++pCpu->uCurrentAsid; 958 bool fHitASIDLimit = false; 959 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid) 932 else 933 { 934 #ifdef VBOX_WITH_NESTED_HWVIRT 935 /* 936 * Only if the nested hypervisor says it does not need to flush anything in the TLB, 937 * can we possibly apply it on the host. Otherwise, the nested-guest TLB flush setting 938 * should be used and then the host settings be added on top. 939 */ 940 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 941 { 942 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 943 if (pVmcbNstGstCache->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING) 944 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING; 945 else 946 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = pVmcbNstGstCache->TLBCtrl.n.u8TLBFlush; 947 } 948 #else 949 RT_NOREF(pCtx); 950 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING; 951 #endif 952 if (pVCpu->hm.s.fForceTLBFlush) 953 { 954 /* Clear the VMCB Clean Bit for NP while flushing the TLB. See @bugref{7152}. */ 955 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP; 956 957 if (fNewAsid) 960 958 { 961 pCpu->uCurrentAsid = 1; /* Wraparound at 1; host uses 0 */ 962 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new ASID. */ 963 fHitASIDLimit = true; 964 965 if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) 959 ++pCpu->uCurrentAsid; 960 bool fHitASIDLimit = false; 961 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid) 966 962 { 967 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; 968 pCpu->fFlushAsidBeforeUse = true; 963 pCpu->uCurrentAsid = 1; /* Wraparound at 1; host uses 0 */ 964 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new ASID. */ 965 fHitASIDLimit = true; 966 967 if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) 968 { 969 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; 970 pCpu->fFlushAsidBeforeUse = true; 971 } 972 else 973 { 974 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 975 pCpu->fFlushAsidBeforeUse = false; 976 } 969 977 } 970 else 978 979 if ( !fHitASIDLimit 980 && pCpu->fFlushAsidBeforeUse) 971 981 { 972 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 973 pCpu->fFlushAsidBeforeUse = false; 982 if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) 983 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; 984 else 985 { 986 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 987 pCpu->fFlushAsidBeforeUse = false; 988 } 974 989 } 990 991 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid; 992 pVCpu->hm.s.idLastCpu = pCpu->idCpu; 993 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes; 975 994 } 976 977 if ( !fHitASIDLimit 978 && pCpu->fFlushAsidBeforeUse) 995 else 979 996 { 980 997 if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) 981 998 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; 982 999 else 983 {984 1000 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 985 pCpu->fFlushAsidBeforeUse = false;986 }987 1001 } 988 1002 989 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid; 990 pVCpu->hm.s.idLastCpu = pCpu->idCpu; 991 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes; 992 } 993 else 994 { 995 if (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) 996 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; 997 else 998 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 999 } 1000 1001 pVCpu->hm.s.fForceTLBFlush = false; 1003 pVCpu->hm.s.fForceTLBFlush = false; 1004 } 1002 1005 } 1003 1006 … … 1010 1013 1011 1014 #ifdef VBOX_WITH_NESTED_HWVIRT 1012 Assert( pVmcb->ctrl.TLBCtrl.n.u8TLBFlush != SVM_TLB_FLUSH_NOTHING);1015 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx) || pVmcb->ctrl.TLBCtrl.n.u8TLBFlush != SVM_TLB_FLUSH_NOTHING); 1013 1016 #endif 1014 1017 … … 1342 1345 static void hmR0SvmLoadGuestControlRegsNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCPUMCTX pCtx) 1343 1346 { 1347 /* 1348 * Guest CR0. 1349 */ 1350 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)) 1351 { 1352 pVmcbNstGst->guest.u64CR0 = pCtx->cr0; 1353 pVmcbNstGst->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1354 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0); 1355 } 1356 1344 1357 /* 1345 1358 * Guest CR2. … … 2006 2019 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); 2007 2020 2008 Log4(("Load: CS:RIP=%04x:%RX64 EFL=%#x SS:RSP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->ss.Sel, pCtx->rsp)); 2021 Log4(("hmR0SvmLoadGuestState: CS:RIP=%04x:%RX64 EFL=%#x CR0=%#RX32 CR3=%#RX32 CR4=%#RX32\n", pCtx->cs.Sel, pCtx->rip, 2022 pCtx->eflags.u, pCtx->cr0, pCtx->cr3, pCtx->cr4)); 2009 2023 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 2010 2024 return rc; … … 2057 2071 hmR0SvmVmRunCacheVmcb(pVCpu, pCtx); 2058 2072 2073 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 2074 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 2075 2059 2076 /* 2060 2077 * The IOPM of the nested-guest can be ignored because the the guest always … … 2062 2079 * into the nested-guest one and swap it back on the #VMEXIT. 2063 2080 */ 2064 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 2065 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 2066 pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap; 2081 pVmcbNstGstCtrl->u64IOPMPhysAddr = g_HCPhysIOBitmap; 2067 2082 2068 2083 /* … … 2138 2153 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); 2139 2154 2140 Log4(("Load: CS:RIP=%04x:%RX64 EFL=%#x SS:RSP=%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->ss.Sel, pCtx->rsp)); 2155 Log4(("hmR0SvmLoadGuestStateNested: CS:RIP=%04x:%RX64 EFL=%#x CR0=%#RX32 CR3=%#RX32 CR4=%#RX32\n", pCtx->cs.Sel, pCtx->rip, 2156 pCtx->eflags.u, pCtx->cr0, pCtx->cr3, pCtx->cr4)); 2141 2157 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 2142 2158 return rc; … … 2166 2182 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 2167 2183 hmR0SvmLoadSharedCR0(pVCpu, pVmcb, pCtx); 2184 else 2185 Assert(pVmcb->guest.u64CR0 == pCtx->cr0); 2168 2186 #else 2169 2187 hmR0SvmLoadSharedCR0(pVCpu, pVmcb, pCtx); … … 2227 2245 if (CPUMIsGuestInNestedHwVirtMode(pMixedCtx)) 2228 2246 { 2229 pMixedCtx->cr3 2230 pMixedCtx->cr4 2231 pMixedCtx->cr0 2247 pMixedCtx->cr3 = pVmcb->guest.u64CR3; 2248 pMixedCtx->cr4 = pVmcb->guest.u64CR4; 2249 pMixedCtx->cr0 = pVmcb->guest.u64CR0; 2232 2250 } 2233 2251 #endif … … 4027 4045 #ifdef VBOX_WITH_NESTED_HWVIRT 4028 4046 /** 4047 * Wrapper for running the nested-guest code in AMD-V. 4048 * 4049 * @returns VBox strict status code. 4050 * @param pVM The cross context VM structure. 4051 * @param pVCpu The cross context virtual CPU structure. 4052 * @param pCtx Pointer to the guest-CPU context. 4053 * 4054 * @remarks No-long-jump zone!!! 4055 */ 4056 DECLINLINE(int) hmR0SvmRunGuestNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 4057 { 4058 /* 4059 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations 4060 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper. 4061 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details. 4062 */ 4063 #ifdef VBOX_WITH_KERNEL_USING_XMM 4064 return hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pCtx->hwvirt.svm.HCPhysVmcb, pCtx, pVM, pVCpu, 4065 pVCpu->hm.s.svm.pfnVMRun); 4066 #else 4067 return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pCtx->hwvirt.svm.HCPhysVmcb, pCtx, pVM, pVCpu); 4068 #endif 4069 } 4070 4071 4072 /** 4029 4073 * Performs some essential restoration of state after running nested-guest code in 4030 4074 * AMD-V. … … 4393 4437 */ 4394 4438 hmR0SvmPreRunGuestCommittedNested(pVM, pVCpu, pCtx, &SvmTransient); 4395 rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx); 4439 4440 rc = hmR0SvmRunGuestNested(pVM, pVCpu, pCtx); 4396 4441 4397 4442 /* Restore any residual host-state and save any bits shared between host … … 4399 4444 hmR0SvmPostRunGuestNested(pVM, pVCpu, pCtx, &SvmTransient, rc); 4400 4445 4446 /** @todo This needs some work... we probably should cause a \#VMEXIT on 4447 * SVM_EXIT_INVALID and handle rc != VINF_SUCCESS differently. */ 4401 4448 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */ 4402 4449 || SvmTransient.u64ExitCode == SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */ … … 4699 4746 case SVM_EXIT_INTR: 4700 4747 { 4701 if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_INTR) 4702 return hmR0SvmExecVmexit(pVCpu, pCtx); 4748 /* We shouldn't direct physical interrupts to the nested-guest. */ 4703 4749 return hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient); 4704 4750 } … … 5754 5800 { 5755 5801 /* 5756 * Disable the global interrupt flag to not cause any interrupts or NMIs5757 * in the guest.5758 */5759 pCtx->hwvirt.svm.fGif = 0;5760 5761 /*5762 * Restore the guest's "host" state.5763 */5764 CPUMSvmVmExitRestoreHostState(pCtx);5765 5766 /*5767 * Restore the guest's force-flags.5768 */5769 if (pCtx->hwvirt.fLocalForcedActions)5770 {5771 VMCPU_FF_SET(pVCpu, pCtx->hwvirt.fLocalForcedActions);5772 pCtx->hwvirt.fLocalForcedActions = 0;5773 }5774 5775 /*5776 5802 * Restore the modifications we did to the nested-guest VMCB in order 5777 5803 * to execute the nested-guest in SVM R0. … … 5784 5810 5785 5811 /* 5786 * Write the nested-guest VMCB back to nested-guest memory.5812 * Write the nested-guest VMCB back to guest memory. 5787 5813 */ 5788 5814 RTGCPHYS const GCPhysVmcb = pCtx->hwvirt.svm.GCPhysVmcb; … … 5795 5821 memset(pVmcbNstGstCtrl, 0, sizeof(*pVmcbNstGstCtrl)); 5796 5822 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); 5823 5824 /* 5825 * Disable the global interrupt flag to not cause any interrupts or NMIs 5826 * in the guest. 5827 */ 5828 pCtx->hwvirt.svm.fGif = 0; 5829 5830 /* 5831 * Restore the guest's "host" state. 5832 */ 5833 CPUMSvmVmExitRestoreHostState(pCtx); 5834 5835 /* 5836 * Restore the guest's force-flags. 5837 */ 5838 if (pCtx->hwvirt.fLocalForcedActions) 5839 { 5840 VMCPU_FF_SET(pVCpu, pCtx->hwvirt.fLocalForcedActions); 5841 pCtx->hwvirt.fLocalForcedActions = 0; 5842 } 5797 5843 5798 5844 /* … … 5914 5960 * IO permission bitmap (IOPM). 5915 5961 */ 5916 RTHCPHYS HCPhysNstGstMsrpm; 5917 rc = PGMPhysGCPhys2HCPhys(pVM, pVmcbNstGstCtrl->u64MSRPMPhysAddr, &HCPhysNstGstMsrpm); 5962 RTGCPHYS const GCPhysIOBitmap = pVmcbNstGstCtrl->u64MSRPMPhysAddr; 5963 rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap), GCPhysIOBitmap, 5964 SVM_IOPM_PAGES * X86_PAGE_4K_SIZE); 5918 5965 if (RT_FAILURE(rc)) 5919 5966 { 5920 Log(("hmR0SvmExecVmrun: Failed reading the MSR permission bitmap at %#RGp. rc=%Rrc\n", GCPhysMsrBitmap, rc));5967 Log(("hmR0SvmExecVmrun: Failed reading the IO permission bitmap at %#RGp. rc=%Rrc\n", GCPhysIOBitmap, rc)); 5921 5968 pVmcbNstGstCtrl->u64ExitCode = SVM_EXIT_INVALID; 5922 5969 return hmR0SvmExecVmexit(pVCpu, pCtx); … … 6034 6081 pCtx->hwvirt.svm.fGif = 1; 6035 6082 6083 Log4(("hmR0SvmExecVmrun: CR0=%#RX32 CR3=%#RX64 CR4=%#RX32\n", pCtx->cr0, pCtx->cr3, pCtx->cr4)); 6036 6084 return hmR0SvmNstGstWorldSwitch(pVCpu, pCtx); 6037 6085 }
Note:
See TracChangeset
for help on using the changeset viewer.