Changeset 32419 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Sep 10, 2010 3:41:00 PM (15 years ago)
- svn:sync-xref-src-repo-rev:
- 65781
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp
r29250 r32419 39 39 DECLINLINE(uint64_t) tmCpuTickGetRawVirtual(PVM pVM, bool fCheckTimers) 40 40 { 41 uint64_t u64 = TMVirtualSyncGetEx(pVM, fCheckTimers); 42 if (u64 != TMCLOCK_FREQ_VIRTUAL) 41 uint64_t u64; 42 if (fCheckTimers) 43 u64 = TMVirtualSyncGet(pVM); 44 else 45 u64 = TMVirtualSyncGetNoCheck(pVM); 46 if (u64 != TMCLOCK_FREQ_VIRTUAL) /* what's the use of this test, document! */ 43 47 u64 = ASMMultU64ByU32DivByU32(u64, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL); 44 48 return u64; … … 94 98 AssertFailed(); 95 99 return VERR_INTERNAL_ERROR; 100 } 101 102 /** 103 * Record why we refused to use offsetted TSC. 104 * 105 * Used by TMCpuTickCanUseRealTSC and TMCpuTickGetDeadlineAndTscOffset. 106 * 107 * @param pVM The VM handle. 108 * @param pVCpu The current CPU. 109 */ 110 DECLINLINE(void) tmCpuTickRecordOffsettedTscRefusal(PVM pVM, PVMCPU pVCpu) 111 { 112 113 /* Sample the reason for refusing. */ 114 if (!pVM->tm.s.fMaybeUseOffsettedHostTSC) 115 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotFixed); 116 else if (!pVCpu->tm.s.fTSCTicking) 117 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotTicking); 118 else if (!pVM->tm.s.fTSCUseRealTSC) 119 { 120 if (pVM->tm.s.fVirtualSyncCatchUp) 121 { 122 if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 10) 123 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE010); 124 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 25) 125 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE025); 126 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 100) 127 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE100); 128 else 129 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupOther); 130 } 131 else if (!pVM->tm.s.fVirtualSyncTicking) 132 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSyncNotTicking); 133 else if (pVM->tm.s.fVirtualWarpDrive) 134 STAM_COUNTER_INC(&pVM->tm.s.StatTSCWarp); 135 } 96 136 } 97 137 … … 156 196 157 197 #ifdef VBOX_WITH_STATISTICS 158 /* Sample the reason for refusing. */ 159 if (!pVM->tm.s.fMaybeUseOffsettedHostTSC) 160 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotFixed); 161 else if (!pVCpu->tm.s.fTSCTicking) 162 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotTicking); 163 else if (!pVM->tm.s.fTSCUseRealTSC) 164 { 165 if (pVM->tm.s.fVirtualSyncCatchUp) 166 { 167 if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 10) 168 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE010); 169 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 25) 170 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE025); 171 else if (pVM->tm.s.u32VirtualSyncCatchUpPercentage <= 100) 172 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupLE100); 173 else 174 STAM_COUNTER_INC(&pVM->tm.s.StatTSCCatchupOther); 175 } 176 else if (!pVM->tm.s.fVirtualSyncTicking) 177 STAM_COUNTER_INC(&pVM->tm.s.StatTSCSyncNotTicking); 178 else if (pVM->tm.s.fVirtualWarpDrive) 179 STAM_COUNTER_INC(&pVM->tm.s.StatTSCWarp); 180 } 198 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu); 181 199 #endif 182 200 return false; 201 } 202 203 204 /** 205 * Calculates the number of host CPU ticks till the next virtual sync deadline. 206 * 207 * @note To save work, this function will not bother calculating the accurate 208 * tick count for deadlines that are more than a second ahead. 209 * 210 * @returns The number of host cpu ticks to the next deadline. Max one second. 211 * @param cNsToDeadline The number of nano seconds to the next virtual 212 * sync deadline. 213 */ 214 DECLINLINE(uint64_t) tmCpuCalcTicksToDeadline(uint64_t cNsToDeadline) 215 { 216 AssertCompile(TMCLOCK_FREQ_VIRTUAL <= _4G); 217 if (RT_UNLIKELY(cNsToDeadline >= TMCLOCK_FREQ_VIRTUAL)) 218 return SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage); 219 uint64_t cTicks = ASMMultU64ByU32DivByU32(SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage), 220 cNsToDeadline, 221 TMCLOCK_FREQ_VIRTUAL); 222 if (cTicks > 4000) 223 cTicks -= 4000; /* fudge to account for overhead */ 224 else 225 cTicks >>= 1; 226 return cTicks; 227 } 228 229 230 /** 231 * Gets the next deadline in host CPU clock ticks and the TSC offset if we can 232 * use the raw TSC. 233 * 234 * @returns The number of host CPU clock ticks to the next timer deadline. 235 * @param pVCpu The current CPU. 236 * @param poffRealTSC The offset against the TSC of the current CPU. 237 * @thread EMT(pVCpu). 238 * @remarks Superset of TMCpuTickCanUseRealTSC. 239 */ 240 VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, uint64_t *poffRealTSC) 241 { 242 PVM pVM = pVCpu->CTX_SUFF(pVM); 243 uint64_t cTicksToDeadline; 244 245 /* 246 * We require: 247 * 1. A fixed TSC, this is checked at init time. 248 * 2. That the TSC is ticking (we shouldn't be here if it isn't) 249 * 3. Either that we're using the real TSC as time source or 250 * a) we don't have any lag to catch up, and 251 * b) the virtual sync clock hasn't been halted by an expired timer, and 252 * c) we're not using warp drive (accelerated virtual guest time). 253 */ 254 if ( pVM->tm.s.fMaybeUseOffsettedHostTSC 255 && RT_LIKELY(pVCpu->tm.s.fTSCTicking) 256 && ( pVM->tm.s.fTSCUseRealTSC 257 || ( !pVM->tm.s.fVirtualSyncCatchUp 258 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking) 259 && !pVM->tm.s.fVirtualWarpDrive)) 260 ) 261 { 262 *pfOffsettedTsc = true; 263 if (!pVM->tm.s.fTSCUseRealTSC) 264 { 265 /* The source is the timer synchronous virtual clock. */ 266 Assert(pVM->tm.s.fTSCVirtualized); 267 268 uint64_t cNsToDeadline; 269 uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline); 270 uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */ 271 ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL) 272 : u64NowVirtSync; 273 u64Now -= pVCpu->tm.s.offTSCRawSrc; 274 *poffRealTSC = u64Now - ASMReadTSC(); 275 cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline); 276 } 277 else 278 { 279 /* The source is the real TSC. */ 280 if (pVM->tm.s.fTSCVirtualized) 281 *poffRealTSC = pVCpu->tm.s.offTSCRawSrc; 282 else 283 *poffRealTSC = 0; 284 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM)); 285 } 286 } 287 else 288 { 289 #ifdef VBOX_WITH_STATISTICS 290 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu); 291 #endif 292 *pfOffsettedTsc = false; 293 *poffRealTSC = 0; 294 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM)); 295 } 296 return cTicksToDeadline; 183 297 } 184 298 -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r29250 r32419 370 370 VMM_INT_DECL(uint64_t) TMVirtualGet(PVM pVM) 371 371 { 372 return tmVirtualGet(pVM, true /* check timers*/);372 return tmVirtualGet(pVM, true /*fCheckTimers*/); 373 373 } 374 374 … … 388 388 { 389 389 return tmVirtualGet(pVM, false /*fCheckTimers*/); 390 } 391 392 393 /** 394 * Converts the dead line interval from TMCLOCK_VIRTUAL to host nano seconds. 395 * 396 * @returns Host nano second count. 397 * @param pVM The VM handle. 398 * @param cVirtTicksToDeadline The TMCLOCK_VIRTUAL interval. 399 */ 400 DECLINLINE(uint64_t) tmVirtualVirtToNsDeadline(PVM pVM, uint64_t cVirtTicksToDeadline) 401 { 402 if (RT_UNLIKELY(pVM->tm.s.fVirtualWarpDrive)) 403 return ASMMultU64ByU32DivByU32(cVirtTicksToDeadline, 100, pVM->tm.s.u32VirtualWarpDrivePercentage); 404 return cVirtTicksToDeadline; 390 405 } 391 406 … … 398 413 * @param u64 raw virtual time. 399 414 * @param off offVirtualSync. 400 */ 401 DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVM pVM, uint64_t u64, uint64_t off) 415 * @param pcNsToDeadline Where to return the number of nano seconds to 416 * the next virtual sync timer deadline. Can be 417 * NULL. 418 */ 419 DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVM pVM, uint64_t u64, uint64_t off, uint64_t *pcNsToDeadline) 402 420 { 403 421 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked); … … 451 469 if (fUpdatePrev) 452 470 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64); 471 if (pcNsToDeadline) 472 { 473 uint64_t cNsToDeadline = u64Expire - u64; 474 if (pVM->tm.s.fVirtualSyncCatchUp) 475 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100, 476 pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100); 477 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline); 478 } 453 479 tmVirtualSyncUnlock(pVM); 454 480 } … … 466 492 tmVirtualSyncUnlock(pVM); 467 493 494 if (pcNsToDeadline) 495 *pcNsToDeadline = 0; 468 496 #ifdef IN_RING3 469 497 REMR3NotifyTimerPending(pVM, pVCpuDst); … … 486 514 * @param pVM The VM handle. 487 515 * @param u64 The virtual clock timestamp. 488 */ 489 DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVM pVM, uint64_t u64) 516 * @param pcNsToDeadline Where to return the number of nano seconds to 517 * the next virtual sync timer deadline. Can be 518 * NULL. 519 */ 520 DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVM pVM, uint64_t u64, uint64_t *pcNsToDeadline) 490 521 { 491 522 /* … … 496 527 u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync); 497 528 tmVirtualSyncUnlock(pVM); 529 if (pcNsToDeadline) 530 *pcNsToDeadline = 0; 498 531 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked); 499 532 Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64)); … … 506 539 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync); 507 540 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)) 508 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off );541 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline); 509 542 510 543 /* … … 516 549 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire); 517 550 if (u64 < u64Expire) 551 { 518 552 tmVirtualSyncUnlock(pVM); 553 if (pcNsToDeadline) 554 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - u64); 555 } 519 556 else 520 557 { … … 534 571 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM); 535 572 #endif 573 if (pcNsToDeadline) 574 *pcNsToDeadline = 0; 536 575 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF); 537 576 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired); … … 547 586 * 548 587 * @returns The timestamp. 549 * @param pVM VM handle. 550 * @param fCheckTimers Check timers or not 588 * @param pVM VM handle. 589 * @param fCheckTimers Check timers or not 590 * @param pcNsToDeadline Where to return the number of nano seconds to 591 * the next virtual sync timer deadline. Can be 592 * NULL. 551 593 * @thread EMT. 552 594 */ 553 DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVM pVM, bool fCheckTimers )595 DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVM pVM, bool fCheckTimers, uint64_t *pcNsToDeadline) 554 596 { 555 597 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet); 556 598 557 599 if (!pVM->tm.s.fVirtualSyncTicking) 600 { 601 if (pcNsToDeadline) 602 *pcNsToDeadline = 0; 558 603 return pVM->tm.s.u64VirtualSync; 604 } 559 605 560 606 /* … … 594 640 { 595 641 off = u64 - off; 596 if (off < ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)) 642 uint64_t const u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire); 643 if (off < u64Expire) 597 644 { 645 if (pcNsToDeadline) 646 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - off); 598 647 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless); 599 648 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off)); … … 608 657 if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))) 609 658 { 659 if (pcNsToDeadline) 660 *pcNsToDeadline = 0; 610 661 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless); 611 662 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off)); … … 639 690 int rcLock = tmVirtualSyncTryLock(pVM); 640 691 if (RT_SUCCESS_NP(rcLock)) 641 return tmVirtualSyncGetLocked(pVM, u64 );692 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline); 642 693 643 694 /* Re-check the ticking flag. */ … … 648 699 && cOuterTries > 0) 649 700 continue; 701 if (pcNsToDeadline) 702 *pcNsToDeadline = 0; 650 703 Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off)); 651 704 return off; … … 727 780 else 728 781 Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); 782 if (pcNsToDeadline) 783 *pcNsToDeadline = 0; 729 784 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired); 785 } 786 else if (pcNsToDeadline) 787 { 788 uint64_t cNsToDeadline = u64Expire - u64; 789 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)) 790 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100, 791 ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) + 100); 792 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline); 730 793 } 731 794 … … 745 808 VMM_INT_DECL(uint64_t) TMVirtualSyncGet(PVM pVM) 746 809 { 747 return tmVirtualSyncGetEx(pVM, true /* check timers*/);810 return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/); 748 811 } 749 812 … … 760 823 VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheck(PVM pVM) 761 824 { 762 return tmVirtualSyncGetEx(pVM, false /* check timers*/);825 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/); 763 826 } 764 827 … … 775 838 VMM_INT_DECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers) 776 839 { 777 return tmVirtualSyncGetEx(pVM, fCheckTimers); 840 return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/); 841 } 842 843 844 /** 845 * Gets the current TMCLOCK_VIRTUAL_SYNC time and ticks to the next deadline 846 * without checking timers running on TMCLOCK_VIRTUAL. 847 * 848 * @returns The timestamp. 849 * @param pVM VM handle. 850 * @param pcNsToDeadline Where to return the number of nano seconds to 851 * the next virtual sync timer deadline. 852 * @thread EMT. 853 * @remarks May set the timer and virtual sync FFs. 854 */ 855 VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVM pVM, uint64_t *pcNsToDeadline) 856 { 857 uint64_t cNsToDeadlineTmp; /* try convince the compiler to skip the if tests. */ 858 uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp); 859 *pcNsToDeadline = cNsToDeadlineTmp; 860 return u64Now; 861 } 862 863 864 /** 865 * Gets the number of nano seconds to the next virtual sync deadline. 866 * 867 * @returns The number of TMCLOCK_VIRTUAL ticks. 868 * @param pVM VM handle. 869 * @thread EMT. 870 * @remarks May set the timer and virtual sync FFs. 871 */ 872 VMM_INT_DECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVM pVM) 873 { 874 uint64_t cNsToDeadline; 875 tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline); 876 return cNsToDeadline; 778 877 } 779 878 -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r31786 r32419 395 395 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; 396 396 /* External and non-maskable interrupts cause VM-exits. */ 397 val = val | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT; 397 val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT; 398 /* enable the preemption timer. */ 399 if (pVM->hwaccm.s.vmx.fUsePreemptTimer) 400 val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER; 398 401 val &= pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.allowed1; 399 402 … … 1888 1891 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 1889 1892 1890 if (TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hwaccm.s.vmx.u64TSCOffset)) 1893 bool fOffsettedTsc; 1894 if (pVM->hwaccm.s.vmx.fUsePreemptTimer) 1895 { 1896 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hwaccm.s.vmx.u64TSCOffset); 1897 cTicksToDeadline >>= pVM->hwaccm.s.vmx.cPreemptTimerShift; 1898 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16); 1899 rc = VMXWriteVMCS(VMX_VMCS32_GUEST_PREEMPTION_TIMER_VALUE, cPreemptionTickCount); 1900 AssertRC(rc); 1901 } 1902 else 1903 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hwaccm.s.vmx.u64TSCOffset); 1904 if (fOffsettedTsc) 1891 1905 { 1892 1906 uint64_t u64CurTSC = ASMReadTSC(); … … 3957 3971 3958 3972 case VMX_EXIT_PREEMPTION_TIMER: /* 52 VMX-preemption timer expired. The preemption timer counted down to zero. */ 3959 goto ResumeExecution; 3973 if (!TMTimerPollBool(pVM, pVCpu)) 3974 goto ResumeExecution; 3975 rc = VINF_EM_RAW_TIMER_PENDING; 3976 break; 3960 3977 3961 3978 default:
Note:
See TracChangeset
for help on using the changeset viewer.