Changeset 54065 in vbox for trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp
- Timestamp:
- Feb 3, 2015 10:45:39 AM (10 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp
r53441 r54065 5 5 6 6 /* 7 * Copyright (C) 2006-201 4Oracle Corporation7 * Copyright (C) 2006-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 174 174 * Record why we refused to use offsetted TSC. 175 175 * 176 * Used by TMCpuTickCanUseRealTSC and TMCpuTickGetDeadlineAndTscOffset.176 * Used by TMCpuTickCanUseRealTSC() and TMCpuTickGetDeadlineAndTscOffset(). 177 177 * 178 178 * @param pVM Pointer to the VM. … … 213 213 * @param pVCpu Pointer to the VMCPU. 214 214 * @param poffRealTSC The offset against the TSC of the current CPU. 215 * Can be NULL.216 * @param pfParavirtTsc Where to store whether paravirt. TSC can be used or217 * not.218 * @ thread EMT(pVCpu).215 * @param pfParavirtTsc Where to store whether paravirt. TSC is enabled. 216 * 217 * @thread EMT(pVCpu). 218 * @see TMCpuTickGetDeadlineAndTscOffset(). 219 219 */ 220 220 VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfParavirtTsc) 221 221 { 222 222 PVM pVM = pVCpu->CTX_SUFF(pVM); 223 bool f ParavirtTsc = false;223 bool fOffsettedTsc = false; 224 224 225 225 /* 226 226 * We require: 227 * 1. Use of a paravirtualized TSC is enabled by the guest.228 * (OR)229 227 * 1. A fixed TSC, this is checked at init time. 230 228 * 2. That the TSC is ticking (we shouldn't be here if it isn't) … … 234 232 * c) we're not using warp drive (accelerated virtual guest time). 235 233 */ 236 *pfParavirtTsc = GIMIsParavirtTscEnabled(pVM); 237 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC 238 && RT_LIKELY(pVCpu->tm.s.fTSCTicking) 239 && ( pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET 240 || ( !pVM->tm.s.fVirtualSyncCatchUp 241 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking) 242 && !pVM->tm.s.fVirtualWarpDrive))) 243 { 244 if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET) 245 { 246 /* The source is the timer synchronous virtual clock. */ 247 if (poffRealTSC) 248 { 249 uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) 250 - pVCpu->tm.s.offTSCRawSrc; 251 /** @todo When we start collecting statistics on how much time we spend executing 252 * guest code before exiting, we should check this against the next virtual sync 253 * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase 254 * the chance that we'll get interrupted right after the timer expired. */ 255 *poffRealTSC = u64Now - ASMReadTSC(); 256 } 257 } 258 else if (poffRealTSC) 259 { 260 /* The source is the real TSC. */ 261 *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc; 262 } 263 /** @todo count this? */ 264 return true; 234 Assert(pVCpu->tm.s.fTSCTicking); 235 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled; 236 237 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) 238 { 239 /* The source is the real TSC. */ 240 *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc; 241 return true; /** @todo count this? */ 242 } 243 244 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC 245 && !pVM->tm.s.fVirtualSyncCatchUp 246 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking) 247 && !pVM->tm.s.fVirtualWarpDrive) 248 { 249 /* The source is the timer synchronous virtual clock. */ 250 uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) 251 - pVCpu->tm.s.offTSCRawSrc; 252 /** @todo When we start collecting statistics on how much time we spend executing 253 * guest code before exiting, we should check this against the next virtual sync 254 * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase 255 * the chance that we'll get interrupted right after the timer expired. */ 256 uint64_t u64TSC = ASMReadTSC(); /** @todo should be replaced with SUPReadTSC() eventually. */ 257 *poffRealTSC = u64Now - u64TSC; 258 fOffsettedTsc = u64Now >= pVCpu->tm.s.u64TSCLastSeen; 259 return true; /** @todo count this? */ 265 260 } 266 261 … … 304 299 * @returns The number of host CPU clock ticks to the next timer deadline. 305 300 * @param pVCpu The current CPU. 306 * @param pfParavirtTsc Where to store whether paravirt. TSC can be used or307 * not.308 301 * @param poffRealTSC The offset against the TSC of the current CPU. 302 * @param pfOffsettedTsc Where to store whether TSC offsetting can be used. 303 * @param pfParavirtTsc Where to store whether paravirt. TSC is enabled. 309 304 * 310 305 * @thread EMT(pVCpu). 311 * @ remarks Superset ofTMCpuTickCanUseRealTSC().312 */ 313 VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, bool *pfParavirtTsc,314 uint64_t *poffRealTSC)315 { 316 PVM 317 uint64_t 306 * @see TMCpuTickCanUseRealTSC(). 307 */ 308 VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfOffsettedTsc, 309 bool *pfParavirtTsc) 310 { 311 PVM pVM = pVCpu->CTX_SUFF(pVM); 312 uint64_t cTicksToDeadline; 318 313 319 314 /* 320 315 * We require: 321 * 1. Use of a paravirtualized TSC is enabled by the guest.322 * (OR)323 316 * 1. A fixed TSC, this is checked at init time. 324 317 * 2. That the TSC is ticking (we shouldn't be here if it isn't) … … 328 321 * c) we're not using warp drive (accelerated virtual guest time). 329 322 */ 330 *pfParavirtTsc = GIMIsParavirtTscEnabled(pVM); 331 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC 332 && RT_LIKELY(pVCpu->tm.s.fTSCTicking) 333 && ( pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET 334 || ( !pVM->tm.s.fVirtualSyncCatchUp 335 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking) 336 && !pVM->tm.s.fVirtualWarpDrive))) 337 { 323 Assert(pVCpu->tm.s.fTSCTicking); 324 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled; 325 326 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) 327 { 328 /* The source is the real TSC. */ 329 *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc; 338 330 *pfOffsettedTsc = true; 339 if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET) 340 { 341 /* The source is the timer synchronous virtual clock. */ 342 uint64_t cNsToDeadline; 343 uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline); 344 uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */ 345 ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL) 346 : u64NowVirtSync; 347 u64Now -= pVCpu->tm.s.offTSCRawSrc; 348 *poffRealTSC = u64Now - ASMReadTSC(); 349 cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline); 350 } 351 else 352 { 353 /* The source is the real TSC. */ 354 *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc; 355 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM)); 356 } 357 } 358 else 359 { 331 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM)); 332 return cTicksToDeadline; 333 } 334 335 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC 336 && !pVM->tm.s.fVirtualSyncCatchUp 337 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking) 338 && !pVM->tm.s.fVirtualWarpDrive) 339 { 340 /* The source is the timer synchronous virtual clock. */ 341 uint64_t cNsToDeadline; 342 uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline); 343 uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */ 344 ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL) 345 : u64NowVirtSync; 346 u64Now -= pVCpu->tm.s.offTSCRawSrc; 347 *poffRealTSC = u64Now - ASMReadTSC(); /** @todo replace with SUPReadTSC() eventually. */ 348 *pfOffsettedTsc = u64Now >= pVCpu->tm.s.u64TSCLastSeen; 349 cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline); 350 return cTicksToDeadline; 351 } 352 360 353 #ifdef VBOX_WITH_STATISTICS 361 354 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu); 362 355 #endif 363 *pfOffsettedTsc = false; 364 *poffRealTSC = 0; 365 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM)); 366 } 367 356 *pfOffsettedTsc = false; 357 *poffRealTSC = 0; 358 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM)); 368 359 return cTicksToDeadline; 369 360 } … … 395 386 { 396 387 STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow); 397 pVCpu->tm.s.u64TSCLastSeen += 64; /* @todo choose a good increment here */388 pVCpu->tm.s.u64TSCLastSeen += 64; /** @todo choose a good increment here */ 398 389 u64 = pVCpu->tm.s.u64TSCLastSeen; 399 390 } … … 503 494 VMMDECL(uint64_t) TMCpuTicksPerSecond(PVM pVM) 504 495 { 505 /** @todo revisit this, not sure why we need to get the rate from GIP for 506 * real-tsc-offset. */ 507 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) 496 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET 497 && g_pSUPGlobalInfoPage->u32Mode != SUPGIPMODE_INVARIANT_TSC) 508 498 { 509 499 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
Note:
See TracChangeset
for help on using the changeset viewer.