Changeset 70056 in vbox
- Timestamp:
- Dec 11, 2017 2:40:02 PM (7 years ago)
- Location:
- trunk
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r70000 r70056 1208 1208 VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPU pVCpu, PCPUMCTX pCtx); 1209 1209 VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr); 1210 VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PVMCPU pVCpu, uint64_t uTicks); 1210 1211 /** @} */ 1211 1212 -
trunk/include/VBox/vmm/hm_svm.h
r70000 r70056 991 991 /** Cache of MSRPM nested-guest physical address. */ 992 992 uint64_t u64MSRPMPhysAddr; 993 /** Cache of the TSC offset. */ 994 uint64_t u64TSCOffset; 993 995 /** Cache of the VMCB clean bits. */ 994 996 uint64_t u64VmcbCleanBits; -
trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
r69216 r70056 184 184 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 185 185 *puValue = TMCpuTickGet(pVCpu); 186 #ifdef VBOX_WITH_NESTED_HWVIRT 187 *puValue = CPUMApplyNestedGuestTscOffset(pVCpu, *puValue); 188 #endif 186 189 return VINF_SUCCESS; 187 190 } … … 342 345 * what we want? */ 343 346 *puValue = TMCpuTickGet(pVCpu); 347 #ifdef VBOX_WITH_NESTED_HWVIRT 348 *puValue = CPUMApplyNestedGuestTscOffset(pVCpu, *puValue); 349 #endif 344 350 return VINF_SUCCESS; 345 351 } … … 362 368 * what we want? */ 363 369 *puValue = TMCpuTickGet(pVCpu); 370 #ifdef VBOX_WITH_NESTED_HWVIRT 371 *puValue = CPUMApplyNestedGuestTscOffset(pVCpu, *puValue); 372 #endif 364 373 return VINF_SUCCESS; 365 374 } -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r70000 r70056 2695 2695 } 2696 2696 2697 2698 /** 2699 * Applies the TSC offset of a nested-guest if any and returns the new TSC 2700 * value for the guest. 2701 * 2702 * @returns The TSC offset after applying any nested-guest TSC offset. 2703 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 2704 * @param uTicks The guest TSC. 2705 */ 2706 VMM_INT_DECL(uint64_t) CPUMApplyNestedGuestTscOffset(PVMCPU pVCpu, uint64_t uTicks) 2707 { 2708 #ifndef IN_RC 2709 PCCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 2710 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 2711 { 2712 PCSVMVMCB pVmcb = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 2713 return uTicks + pVmcb->ctrl.u64TSCOffset; 2714 } 2715 2716 /** @todo Intel. */ 2717 #else 2718 RT_NOREF(pVCpu); 2719 #endif 2720 return uTicks; 2721 } 2722 -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r69111 r70056 1315 1315 1316 1316 uint64_t uTicks = TMCpuTickGet(pVCpu); 1317 #ifdef VBOX_WITH_NESTED_HWVIRT 1318 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks); 1319 #endif 1317 1320 1318 1321 /* Same behaviour in 32 & 64 bits mode */ … … 1351 1354 1352 1355 uint64_t uTicks = TMCpuTickGet(pVCpu); 1356 #ifdef VBOX_WITH_NESTED_HWVIRT 1357 uTicks = CPUMApplyNestedGuestTscOffset(pVCpu, uTicks); 1358 #endif 1353 1359 1354 1360 /* Same behaviour in 32 & 64 bits mode */ -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r70020 r70056 366 366 pVmcbNstGstCtrl->u64IOPMPhysAddr = pNstGstVmcbCache->u64IOPMPhysAddr; 367 367 pVmcbNstGstCtrl->u64MSRPMPhysAddr = pNstGstVmcbCache->u64MSRPMPhysAddr; 368 pVmcbNstGstCtrl->u64TSCOffset = pNstGstVmcbCache->u64TSCOffset; 368 369 pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = pNstGstVmcbCache->fVIntrMasking; 369 370 pVmcbNstGstCtrl->TLBCtrl = pNstGstVmcbCache->TLBCtrl; -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r70006 r70056 2156 2156 pNstGstVmcbCache->u64IOPMPhysAddr = pVmcbNstGstCtrl->u64IOPMPhysAddr; 2157 2157 pNstGstVmcbCache->u64MSRPMPhysAddr = pVmcbNstGstCtrl->u64MSRPMPhysAddr; 2158 pNstGstVmcbCache->u64TSCOffset = pVmcbNstGstCtrl->u64TSCOffset; 2158 2159 pNstGstVmcbCache->u64VmcbCleanBits = pVmcbNstGstCtrl->u64VmcbCleanBits; 2159 2160 pNstGstVmcbCache->fVIntrMasking = pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking; … … 2707 2708 return rcExit; 2708 2709 } 2710 2711 2712 #ifdef VBOX_WITH_NESTED_HWVIRT 2713 /** 2714 * Updates the use of TSC offsetting mode for the CPU and adjusts the necessary 2715 * intercepts for the nested-guest. 2716 * 2717 * @param pVM The cross context VM structure. 2718 * @param pVCpu The cross context virtual CPU structure. 2719 * @param pCtx Pointer to the nested guest-CPU context. 2720 * @param pVmcbNstGst Pointer to the nested-guest VM control block. 2721 * 2722 * @remarks No-long-jump zone!!! 2723 */ 2724 static void hmR0SvmUpdateTscOffsettingNested(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcbNstGst) 2725 { 2726 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); 2727 2728 bool fParavirtTsc; 2729 uint64_t uTscOffset; 2730 bool fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc); 2731 2732 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 2733 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 2734 2735 /* 2736 * Only avoid intercepting if we determined the host TSC (++) is stable enough 2737 * to not intercept -and- the nested-hypervisor itself does not want to intercept it. 2738 */ 2739 if ( fCanUseRealTsc 2740 && !(pVmcbNstGstCache->u64InterceptCtrl & (SVM_CTRL_INTERCEPT_RDTSC | SVM_CTRL_INTERCEPT_RDTSCP))) 2741 { 2742 pVmcbNstGstCtrl->u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_RDTSC; 2743 pVmcbNstGstCtrl->u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_RDTSCP; 2744 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset); 2745 } 2746 else 2747 { 2748 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_RDTSC; 2749 pVmcbNstGstCtrl->u64InterceptCtrl |= SVM_CTRL_INTERCEPT_RDTSCP; 2750 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept); 2751 } 2752 2753 /* Apply the nested-guest VMCB's TSC offset over the guest one. */ 2754 uTscOffset = CPUMApplyNestedGuestTscOffset(pVCpu, uTscOffset); 2755 2756 /* Update the nested-guest VMCB with the combined TSC offset (of guest and nested-guest). */ 2757 pVmcbNstGstCtrl->u64TSCOffset = uTscOffset; 2758 2759 /* Finally update the VMCB clean bits since we touched the intercepts as well as the TSC offset. */ 2760 pVmcbNstGstCtrl->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 2761 2762 if (fParavirtTsc) 2763 { 2764 /* Currently neither Hyper-V nor KVM need to update their paravirt. TSC 2765 information before every VM-entry, hence disable it for performance sake. */ 2766 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt); 2767 } 2768 } 2769 #endif 2709 2770 2710 2771 … … 3960 4021 || idCurrentCpu != pVCpu->hm.s.idLastCpu) 3961 4022 { 3962 hmR0SvmUpdateTscOffsetting (pVM, pVCpu, pVmcbNstGst);4023 hmR0SvmUpdateTscOffsettingNested(pVM, pVCpu, pCtx, pVmcbNstGst); 3963 4024 pSvmTransient->fUpdateTscOffsetting = false; 3964 4025 } … … 4213 4274 4214 4275 /* TSC read must be done early for maximum accuracy. */ 4215 PSVMVMCB pVmcbNstGst = pMixedCtx->hwvirt.svm.CTX_SUFF(pVmcb); 4216 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 4276 PSVMVMCB pVmcbNstGst = pMixedCtx->hwvirt.svm.CTX_SUFF(pVmcb); 4277 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 4278 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 4217 4279 if (!(pVmcbNstGstCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC)) 4218 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcbNstGstCtrl->u64TSCOffset); 4280 { 4281 /* 4282 * Undo what we did in hmR0SvmUpdateTscOffsettingNested() but don't restore the 4283 * nested-guest VMCB TSC offset here. It shall eventually be restored on #VMEXIT 4284 * later by HMSvmNstGstVmExitNotify(). 4285 */ 4286 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcbNstGstCtrl->u64TSCOffset - pVmcbNstGstCache->u64TSCOffset); 4287 } 4219 4288 4220 4289 if (pSvmTransient->fRestoreTscAuxMsr)
Note:
See TracChangeset
for help on using the changeset viewer.