Changeset 2581 in vbox
- Timestamp:
- May 10, 2007 3:13:29 PM (18 years ago)
- Location:
- trunk
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/tm.h
r2550 r2581 64 64 65 65 66 67 66 /** @name Real Clock Methods 68 67 * @{ … … 292 291 * @returns TSC ofset 293 292 * @param pVM The VM to operate on. 293 * @todo Remove this when the code has been switched to TMCpuTickCanUseRealTSC. 294 294 */ 295 295 TMDECL(uint64_t) TMCpuTickGetOffset(PVM pVM); 296 297 /** 298 * Checks if AMD-V / VT-x can use an offsetted hardware TSC or not. 299 * 300 * @returns true/false accordingly. 301 * @param pVM The VM handle. 302 * @param poffRealTSC The offset against the TSC of the current CPU. 303 * Can be NULL. 304 * @thread EMT. 305 */ 306 TMDECL(bool) TMCpuTickCanUseRealTSC(PVM pVM, uint64_t *poffRealTSC); 296 307 297 308 /** … … 543 554 */ 544 555 TMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t u64MilliTS); 545 546 556 547 557 /** -
trunk/src/VBox/VMM/TM.cpp
r2464 r2581 133 133 * Internal Functions * 134 134 *******************************************************************************/ 135 static uint64_t tmR3Calibrate(void); 135 static bool tmR3HasFixedTSC(void); 136 static uint64_t tmR3CalibrateTSC(void); 136 137 static DECLCALLBACK(int) tmR3Save(PVM pVM, PSSMHANDLE pSSM); 137 138 static DECLCALLBACK(int) tmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version); … … 253 254 pVM->tm.s.fTSCVirtualized = true; 254 255 256 /* TSC reliability */ 257 rc = CFGMR3QueryBool(pCfgHandle, "MaybeUseOffsettedHostTSC", &pVM->tm.s.fMaybeUseOffsettedHostTSC); 258 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 259 { 260 if (!pVM->tm.s.fTSCUseRealTSC) 261 pVM->tm.s.fMaybeUseOffsettedHostTSC = tmR3HasFixedTSC(); 262 else 263 pVM->tm.s.fMaybeUseOffsettedHostTSC = true; 264 } 265 255 266 /* frequency */ 256 267 rc = CFGMR3QueryU64(pCfgHandle, "TSCTicksPerSecond", &pVM->tm.s.cTSCTicksPerSecond); 257 268 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 258 269 { 259 pVM->tm.s.cTSCTicksPerSecond = tmR3Calibrate ();270 pVM->tm.s.cTSCTicksPerSecond = tmR3CalibrateTSC(); 260 271 if ( !pVM->tm.s.fTSCUseRealTSC 261 272 && pVM->tm.s.cTSCTicksPerSecond >= _4G) 273 { 262 274 pVM->tm.s.cTSCTicksPerSecond = _4G - 1; /* (A limitation of our math code) */ 275 pVM->tm.s.fMaybeUseOffsettedHostTSC = false; 276 } 263 277 } 264 278 else if (VBOX_FAILURE(rc)) … … 272 286 else 273 287 { 274 pVM->tm.s.fTSCUseRealTSC = false;288 pVM->tm.s.fTSCUseRealTSC = pVM->tm.s.fMaybeUseOffsettedHostTSC = false; 275 289 pVM->tm.s.fTSCVirtualized = true; 276 290 } 277 291 278 292 /* setup and report */ 279 if (pVM->tm.s.fTSCUseRealTSC) 293 if (pVM->tm.s.fTSCVirtualized) 294 CPUMR3SetCR4Feature(pVM, X86_CR4_TSD, ~X86_CR4_TSD); 295 else 280 296 CPUMR3SetCR4Feature(pVM, 0, ~X86_CR4_TSD); 281 else 282 CPUMR3SetCR4Feature(pVM, X86_CR4_TSD, ~X86_CR4_TSD); 283 LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool\n", 284 pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.fTSCVirtualized, pVM->tm.s.fTSCUseRealTSC)); 297 LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool fMaybeUseOffsettedHostTSC=%RTbool\n", 298 pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.fTSCVirtualized, 299 pVM->tm.s.fTSCUseRealTSC, pVM->tm.s.fMaybeUseOffsettedHostTSC)); 285 300 286 301 /* … … 464 479 465 480 /** 481 * Checks if the host CPU has a fixed TSC frequency. 482 * 483 * @returns true if it has, false if it hasn't. 484 * 485 * @remark This test doesn't bother with very old CPUs that doesn't do power 486 * management or any other stuff that might influence the TSC rate. 487 * This isn't currently relevant. 488 */ 489 static bool tmR3HasFixedTSC(void) 490 { 491 if (ASMHasCpuId()) 492 { 493 uint32_t uEAX, uEBX, uECX, uEDX; 494 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX); 495 if ( uEAX >= 1 496 && uEBX == 0x68747541 497 && uECX == 0x444d4163 498 && uEDX == 0x69746e65) 499 { 500 /* 501 * AuthenticAMD - Check for APM support and that TscInvariant is set. 502 * 503 * This test isn't correct with respect to fixed/non-fixed TSC and 504 * older models, but this isn't relevant since the result is currently 505 * only used for making a descision on AMD-V models. 506 */ 507 ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX); 508 if (uEAX >= 0x80000007) 509 { 510 ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX); 511 if (uEDX & BIT(8) /* TscInvariant */) 512 return true; 513 } 514 } 515 else if ( uEAX >= 1 516 && uEBX == 0x756e6547 517 && uECX == 0x6c65746e 518 && uEDX == 0x49656e69) 519 { 520 /* 521 * GenuineIntel - Check the model number. 522 * 523 * This test is lacking in the same way and for the same reasons 524 * as the AMD test above. 525 */ 526 ASMCpuId(1, &uEAX, &uEBX, &uECX, &uEDX); 527 unsigned uModel = (uEAX >> 4) & 0x0f; 528 unsigned uFamily = (uEAX >> 8) & 0x0f; 529 if (uFamily == 0x0f) 530 uFamily += (uEAX >> 20) & 0xff; 531 if (uFamily >= 0x06) 532 uModel += ((uEAX >> 16) & 0x0f) << 4; 533 if ( (uFamily == 0x0f /*P4*/ && uModel >= 0x03) 534 || (uFamily == 0x06 /*P2/P3*/ && uModel >= 0x0e)) 535 return true; 536 } 537 } 538 return false; 539 } 540 541 542 /** 466 543 * Calibrate the CPU tick. 467 544 * 468 545 * @returns Number of ticks per second. 469 546 */ 470 static uint64_t tmR3Calibrate (void)547 static uint64_t tmR3CalibrateTSC(void) 471 548 { 472 549 /* -
trunk/src/VBox/VMM/TMInternal.h
r2283 r2581 286 286 RTUINT offVM; 287 287 288 /** Flag indicating that the host TSC is suitable for use in AMD-V and VT-x mode. 289 * Config variable: MaybeUseOffsettedHostTSC (boolean) */ 290 bool fMaybeUseOffsettedHostTSC; 288 291 /** CPU timestamp ticking enabled indicator (bool). (RDTSC) */ 289 292 bool fTSCTicking; -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r2283 r2581 1336 1336 #endif /* !VBOX_STRICT */ 1337 1337 1338 -
trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp
r2551 r2581 99 99 * @returns TSC ofset 100 100 * @param pVM The VM to operate on. 101 * @todo Remove this when the code has been switched to TMCpuTickCanUseRealTSC. 101 102 */ 102 103 TMDECL(uint64_t) TMCpuTickGetOffset(PVM pVM) … … 120 121 121 122 return u64 - ASMReadTSC(); 123 } 124 125 126 /** 127 * Checks if AMD-V / VT-x can use an offsetted hardware TSC or not. 128 * 129 * @returns true/false accordingly. 130 * @param pVM The VM handle. 131 * @param poffRealTSC The offset against the TSC of the current CPU. 132 * Can be NULL. 133 * @thread EMT. 134 */ 135 TMDECL(bool) TMCpuTickCanUseRealTSC(PVM pVM, uint64_t *poffRealTSC) 136 { 137 /* 138 * We require: 139 * 1. A fixed TSC, this is checked at init time. 140 * 2. That the TSC is ticking (we shouldn't be here if it isn't) 141 * 3. Either that we're using the real TSC as time source or 142 * a) We don't have any lag to catch up. 143 * b) The virtual sync clock hasn't been halted by an expired timer. 144 * c) We're not using warp drive (accelerated virtual guest time). 145 */ 146 if ( pVM->tm.s.fMaybeUseOffsettedHostTSC 147 && RT_LIKELY(pVM->tm.s.fTSCTicking) 148 && ( pVM->tm.s.fTSCUseRealTSC 149 || ( !pVM->tm.s.fVirtualSyncCatchUp 150 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking) 151 && !pVM->tm.s.fVirtualWarpDrive)) 152 ) 153 { 154 if (!pVM->tm.s.fTSCUseRealTSC) 155 { 156 /* The source is the timer synchronous virtual clock. */ 157 Assert(pVM->tm.s.fTSCVirtualized); 158 159 if (poffRealTSC) 160 { 161 uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) 162 - pVM->tm.s.u64TSCOffset; 163 /** @todo When we start collecting statistics on how much time we spend executing 164 * guest code before exiting, we should check this against the next virtual sync 165 * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase 166 * the chance that we'll get interrupted right after the timer expired. */ 167 *poffRealTSC = u64Now - ASMReadTSC(); 168 } 169 } 170 else if (poffRealTSC) 171 { 172 /* The source is the real TSC. */ 173 if (pVM->tm.s.fTSCVirtualized) 174 *poffRealTSC = pVM->tm.s.u64TSCOffset; 175 else 176 *poffRealTSC = 0; 177 } 178 return true; 179 } 180 181 return false; 122 182 } 123 183
Note:
See TracChangeset
for help on using the changeset viewer.