Changeset 54065 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Feb 3, 2015 10:45:39 AM (10 years ago)
- svn:sync-xref-src-repo-rev:
- 98006
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r53631 r54065 5 5 6 6 /* 7 * Copyright (C) 2013-201 4Oracle Corporation7 * Copyright (C) 2013-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 2257 2257 static void hmR0SvmUpdateTscOffsetting(PVMCPU pVCpu) 2258 2258 { 2259 bool fParavirtTsc = false; 2259 bool fParavirtTsc; 2260 bool fCanUseRealTsc; 2260 2261 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 2261 if (TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset, &fParavirtTsc)) 2262 { 2263 uint64_t u64CurTSC = ASMReadTSC(); 2264 uint64_t u64LastTick = TMCpuTickGetLastSeen(pVCpu); 2265 2266 if (u64CurTSC + pVmcb->ctrl.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu)) 2267 { 2268 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC; 2269 pVmcb->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP; 2270 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset); 2271 } 2272 else 2273 { 2274 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC; 2275 pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP; 2276 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow); 2277 } 2262 fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset, &fParavirtTsc); 2263 if (fCanUseRealTsc) 2264 { 2265 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC; 2266 pVmcb->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP; 2267 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset); 2278 2268 } 2279 2269 else 2280 2270 { 2281 Assert(!fParavirtTsc);2282 2271 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC; 2283 2272 pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP; 2284 2273 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept); 2285 2274 } 2286 2275 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 2276 2277 /** @todo later optimize this to be done elsewhere and not before every 2278 * VM-entry. */ 2287 2279 if (fParavirtTsc) 2288 2280 { … … 2291 2283 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt); 2292 2284 } 2293 2294 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;2295 2285 } 2296 2286 … … 3200 3190 } 3201 3191 3202 /** @todo Last-seen-tick shouldn't be necessary when TM supports invariant3203 * mode. */3204 3192 if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC)) 3205 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset); 3193 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset); /** @todo use SUPReadTSC() eventually. */ 3206 3194 3207 3195 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x); … … 4357 4345 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 4358 4346 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 4347 if (rc != VINF_SUCCESS) 4348 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHltToR3); 4359 4349 return rc; 4360 4350 } -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r54058 r54065 5 5 6 6 /* 7 * Copyright (C) 2012-201 4Oracle Corporation7 * Copyright (C) 2012-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 5606 5606 if (pVM->hm.s.vmx.fUsePreemptTimer) 5607 5607 { 5608 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, & fOffsettedTsc, &fParavirtTsc,5609 & pVCpu->hm.s.vmx.u64TSCOffset);5608 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fOffsettedTsc, 5609 &fParavirtTsc); 5610 5610 5611 5611 /* Make sure the returned values have sane upper and lower boundaries. */ … … 5616 5616 5617 5617 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16); 5618 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); 5618 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc); 5619 5619 } 5620 5620 else 5621 5621 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc); 5622 5622 5623 /** @todo later optimize this to be done elsewhere and not before every 5624 * VM-entry. */ 5623 5625 if (fParavirtTsc) 5624 5626 { … … 5630 5632 if (fOffsettedTsc) 5631 5633 { 5632 uint64_t u64CurTSC = ASMReadTSC(); 5633 if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu)) 5634 { 5635 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */ 5636 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc); 5637 5638 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT; 5639 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc); 5640 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset); 5641 } 5642 else 5643 { 5644 /* VM-exit on RDTSC(P) as we would otherwise pass decreasing TSC values to the guest. */ 5645 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT; 5646 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc); 5647 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow); 5648 } 5634 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */ 5635 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc); 5636 5637 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT; 5638 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc); 5639 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset); 5649 5640 } 5650 5641 else … … 5652 5643 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */ 5653 5644 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT; 5654 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 5645 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc); 5655 5646 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept); 5656 5647 } … … 8713 8704 pVmxTransient->fVectoringDoublePF = false; /* Vectoring double page-fault needs to be determined later. */ 8714 8705 8715 /** @todo Last-seen-tick shouldn't be necessary when TM supports invariant8716 * mode. */8717 8706 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT)) 8718 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset); 8707 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset); /** @todo use SUPReadTSC() eventually. */ 8719 8708 8720 8709 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x); … … 8839 8828 } 8840 8829 8841 /* Profil ingthe VM-exit. */8830 /* Profile the VM-exit. */ 8842 8831 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason)); 8843 8832 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); … … 8922 8911 return VBOXSTRICTRC_TODO(rcStrict); 8923 8912 } 8924 /* Profiling the VM-exit. */ 8913 8914 /* Profile the VM-exit. */ 8925 8915 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason)); 8926 8916 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitAll); … … 10461 10451 10462 10452 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 10453 if (rc != VINF_SUCCESS) 10454 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHltToR3); 10463 10455 return rc; 10464 10456 }
Note:
See TracChangeset
for help on using the changeset viewer.