Changeset 70056 in vbox for trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
- Timestamp:
- Dec 11, 2017 2:40:02 PM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 119598
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r70006 r70056 2156 2156 pNstGstVmcbCache->u64IOPMPhysAddr = pVmcbNstGstCtrl->u64IOPMPhysAddr; 2157 2157 pNstGstVmcbCache->u64MSRPMPhysAddr = pVmcbNstGstCtrl->u64MSRPMPhysAddr; 2158 pNstGstVmcbCache->u64TSCOffset = pVmcbNstGstCtrl->u64TSCOffset; 2158 2159 pNstGstVmcbCache->u64VmcbCleanBits = pVmcbNstGstCtrl->u64VmcbCleanBits; 2159 2160 pNstGstVmcbCache->fVIntrMasking = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking; … … 2707 2708 return rcExit; 2708 2709 } 2710 2711 2712 #ifdef VBOX_WITH_NESTED_HWVIRT 2713 /** 2714 * Updates the use of TSC offsetting mode for the CPU and adjusts the necessary 2715 * intercepts for the nested-guest. 2716 * 2717 * @param pVM The cross context VM structure. 2718 * @param pVCpu The cross context virtual CPU structure. 2719 * @param pCtx Pointer to the nested guest-CPU context. 2720 * @param pVmcbNstGst Pointer to the nested-guest VM control block. 2721 * 2722 * @remarks No-long-jump zone!!! 2723 */ 2724 static void hmR0SvmUpdateTscOffsettingNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcbNstGst) 2725 { 2726 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); 2727 2728 bool fParavirtTsc; 2729 uint64_t uTscOffset; 2730 bool fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc); 2731 2732 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 2733 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 2734 2735 /* 2736 * Only avoid intercepting if we determined the host TSC (++) is stable enough 2737 * to not intercept -and- the nested-hypervisor itself does not want to intercept it. 2738 */ 2739 if ( fCanUseRealTsc 2740 && !(pVmcbNstGstCache->u64InterceptCtrl & (SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP))) 2741 { 2742 pVmcbNstGstCtrl->u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_RDTSC; 2743 pVmcbNstGstCtrl->u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_RDTSCP; 2744 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset); 2745 } 2746 else 2747 { 2748 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_RDTSC; 2749 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_RDTSCP; 2750 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept); 2751 } 2752 2753 /* Apply the nested-guest VMCB's TSC offset over the guest one. */ 2754 uTscOffset = CPUMApplyNestedGuestTscOffset(pVCpu, uTscOffset); 2755 2756 /* Update the nested-guest VMCB with the combined TSC offset (of guest and nested-guest). */ 2757 pVmcbNstGstCtrl->u64TSCOffset = uTscOffset; 2758 2759 /* Finally update the VMCB clean bits since we touched the intercepts as well as the TSC offset. */ 2760 pVmcbNstGstCtrl->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 2761 2762 if (fParavirtTsc) 2763 { 2764 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC 2765 information before every VM-entry, hence disable it for performance sake. */ 2766 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt); 2767 } 2768 } 2769 #endif 2709 2770 2710 2771 … … 3960 4021 || idCurrentCpu != pVCpu->hm.s.idLastCpu) 3961 4022 { 3962 hmR0SvmUpdateTscOffsetting (pVM, pVCpu, pVmcbNstGst);4023 hmR0SvmUpdateTscOffsettingNested(pVM, pVCpu, pCtx, pVmcbNstGst); 3963 4024 pSvmTransient->fUpdateTscOffsetting = false; 3964 4025 } … … 4213 4274 4214 4275 /* TSC read must be done early for maximum accuracy. */ 4215 PSVMVMCB pVmcbNstGst = pMixedCtx->hwvirt.svm.CTX_SUFF(pVmcb); 4216 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 4276 PSVMVMCB pVmcbNstGst = pMixedCtx->hwvirt.svm.CTX_SUFF(pVmcb); 4277 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 4278 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 4217 4279 if (!(pVmcbNstGstCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC)) 4218 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcbNstGstCtrl->u64TSCOffset); 4280 { 4281 /* 4282 * Undo what we did in hmR0SvmUpdateTscOffsettingNested() but don't restore the 4283 * nested-guest VMCB TSC offset here. It shall eventually be restored on #VMEXIT 4284 * later by HMSvmNstGstVmExitNotify(). 4285 */ 4286 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcbNstGstCtrl->u64TSCOffset - pVmcbNstGstCache->u64TSCOffset); 4287 } 4219 4288 4220 4289 if (pSvmTransient->fRestoreTscAuxMsr)
Note:
See TracChangeset
for help on using the changeset viewer.