Changeset 46804 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jun 26, 2013 1:55:06 PM (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r46798 r46804 193 193 * contributary exception or a page-fault. */ 194 194 bool fVectoringPF; 195 /** Whether the TSC offset mode needs to be updated. */ 196 bool fUpdateTscOffsetting; 195 197 } SVMTRANSIENT, *PSVMTRANSIENT; 196 198 /** @} */ … … 1894 1896 1895 1897 /** 1896 * Sets up the usage of TSC offsetting for the VCPU. 1898 * Updates the use of TSC offsetting mode for the CPU and adjusts the necessary 1899 * intercepts. 1897 1900 * 1898 1901 * @param pVCpu Pointer to the VMCPU. … … 1900 1903 * @remarks No-long-jump zone!!! 1901 1904 */ 1902 static void hmR0Svm SetupTscOffsetting(PVMCPU pVCpu)1905 static void hmR0SvmUpdateTscOffsetting(PVMCPU pVCpu) 1903 1906 { 1904 1907 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; … … 2584 2587 } 2585 2588 2589 /* Setup TSC offsetting. */ 2590 if ( pSvmTransient->fUpdateTscOffsetting 2591 || HMR0GetCurrentCpu()->idCpu != pVCpu->hm.s.idLastCpu) 2592 { 2593 pSvmTransient->fUpdateTscOffsetting = false; 2594 hmR0SvmUpdateTscOffsetting(pVCpu); 2595 } 2596 2586 2597 /* Flush the appropriate tagged-TLB entries. */ 2587 2598 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */ … … 2662 2673 pVmcb->ctrl.u64VmcbCleanBits = HMSVM_VMCB_CLEAN_ALL; /* Mark the VMCB-state cache as unmodified by VMM. */ 2663 2674 2664 /* Restore host's TSC_AUX if required. */2665 2675 if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC)) 2666 2676 { 2677 /* Restore host's TSC_AUX if required. */ 2667 2678 if (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP) 2668 2679 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux); 2669 2680 2670 2681 /** @todo Find a way to fix hardcoding a guestimate. */ 2671 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + 2672 pVmcb->ctrl.u64TSCOffset - 0x400); 2682 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset - 0x400); 2673 2683 } 2674 2684 … … 2728 2738 2729 2739 SVMTRANSIENT SvmTransient; 2740 SvmTransient.fUpdateTscOffsetting = true; 2730 2741 uint32_t cLoops = 0; 2731 2742 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; … … 3490 3501 int rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 3491 3502 if (RT_LIKELY(rc == VINF_SUCCESS)) 3503 { 3492 3504 pCtx->rip += 2; /* Hardcoded opcode, AMD-V doesn't give us this information. */ 3505 pSvmTransient->fUpdateTscOffsetting = true; 3506 } 3493 3507 else 3494 3508 { … … 3509 3523 int rc = EMInterpretRdtscp(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx); 3510 3524 if (RT_LIKELY(rc == VINF_SUCCESS)) 3525 { 3511 3526 pCtx->rip += 3; /* Hardcoded opcode, AMD-V doesn't give us this information. */ 3527 pSvmTransient->fUpdateTscOffsetting = true; 3528 } 3512 3529 else 3513 3530 { … … 3744 3761 if (pCtx->ecx == MSR_K6_EFER) 3745 3762 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_EFER_MSR; 3763 else if (pCtx->ecx == MSR_IA32_TSC) 3764 pSvmTransient->fUpdateTscOffsetting = true; 3746 3765 } 3747 3766 else
Note:
See TracChangeset
for help on using the changeset viewer.