Changeset 2869 in vbox for trunk/src/VBox
- Timestamp:
- May 25, 2007 1:15:39 PM (18 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/TM.cpp
r2861 r2869 202 202 203 203 /* 204 * We indirectly - thru RTTimeNanoTS and RTTimeMilliTS - use the global 205 * info page (GIP) for both the virtual and the real clock. By mapping 206 * the GIP into guest context we can get just as accurate time even there. 207 * All that's required is that the g_pSUPGlobalInfoPage symbol is available 208 * to the GC Runtime. 204 * We directly use the GIP to calculate the virtual time. We map the 205 * the GIP into the guest context so we can do this calculation there 206 * as well and save costly world switches. 209 207 */ 210 208 pVM->tm.s.pvGIPR3 = (void *)g_pSUPGlobalInfoPage; … … 223 221 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL); 224 222 223 /* Check assumptions made in TMAllVirtual.cpp about the GIP update interval. */ 224 if ( g_pSUPGlobalInfoPage->u32Magic == SUPGLOBALINFOPAGE_MAGIC 225 && g_pSUPGlobalInfoPage->u32UpdateIntervalNS >= 250000000 /* 0.25s */) 226 return VMSetError(pVM, VERR_INTERNAL_ERROR, RT_SRC_POS, 227 N_("The GIP update interval is too big. u32UpdateIntervalNS=%RU32 (u32UpdateHz=%RU32)\n"), 228 g_pSUPGlobalInfoPage->u32UpdateIntervalNS, g_pSUPGlobalInfoPage->u32UpdateHz); 225 229 226 230 /* … … 416 420 return rc; 417 421 422 /* 423 * Register statistics. 424 */ 425 STAM_REL_REG_USED(pVM, (void *)&pVM->tm.s.c1nsVirtualRawSteps, STAMTYPE_U32, "/TM/1nsSteps", STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations)"); 426 STAM_REL_REG_USED(pVM, (void *)&pVM->tm.s.cVirtualRawBadRawPrev, STAMTYPE_U32, "/TM/BadPrevTime", STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen)."); 427 418 428 #ifdef VBOX_WITH_STATISTICS 419 /*420 * Register statistics.421 */422 429 STAM_REG(pVM, &pVM->tm.s.StatDoQueues, STAMTYPE_PROFILE, "/TM/DoQueues", STAMUNIT_TICKS_PER_CALL, "Profiling timer TMR3TimerQueuesDo."); 423 430 STAM_REG(pVM, &pVM->tm.s.StatDoQueuesSchedule, STAMTYPE_PROFILE_ADV, "/TM/DoQueues/Schedule",STAMUNIT_TICKS_PER_CALL, "The scheduling part."); … … 573 580 if (tmR3HasFixedTSC()) 574 581 /* Sleep a bit to get a more reliable CpuHz value. */ 575 RTThreadSleep(32); 582 RTThreadSleep(32); 576 583 else 577 584 { … … 1456 1463 " fVirtualSyncCatchUp=%RTbool (prev=%RTbool)\n", 1457 1464 u64Now, 1458 u64Max, 1465 u64Max, 1459 1466 pNext->u64Expire, 1460 1467 pVM->tm.s.u64VirtualSync, … … 1468 1475 pVM->tm.s.u64VirtualSyncStoppedTS, 1469 1476 pVM->tm.s.u32VirtualSyncCatchUpPercentage, 1470 pVM->tm.s.fVirtualSyncTicking, fWasTicking, 1477 pVM->tm.s.fVirtualSyncTicking, fWasTicking, 1471 1478 pVM->tm.s.fVirtualSyncCatchUp, fWasInCatchup)); 1472 1479 Assert(u64Now <= u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp); … … 1635 1642 /* don't bother */ 1636 1643 if (offLag & BIT64(63)) //debugging - remove. 1637 LogRel(("TM: offLag is negative! offLag=%RI64 (%#RX64) offNew=%#RX64 u64Elapsed=%#RX64 offSlack=%#RX64 u64VirtualNow2=%#RX64 u64VirtualNow=%#RX64 u64VirtualSync=%#RX64 offVirtualSyncGivenUp=%#RX64 u64Now=%#RX64 u64Max=%#RX64\n", 1644 LogRel(("TM: offLag is negative! offLag=%RI64 (%#RX64) offNew=%#RX64 u64Elapsed=%#RX64 offSlack=%#RX64 u64VirtualNow2=%#RX64 u64VirtualNow=%#RX64 u64VirtualSync=%#RX64 offVirtualSyncGivenUp=%#RX64 u64Now=%#RX64 u64Max=%#RX64\n", 1638 1645 offLag, offLag, offNew, u64Elapsed, offSlack, u64VirtualNow2, u64VirtualNow, pVM->tm.s.u64VirtualSync, pVM->tm.s.offVirtualSyncGivenUp, u64Now, u64Max)); 1639 1646 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting); -
trunk/src/VBox/VMM/TMInternal.h
r2861 r2869 331 331 * Only valid when fVirtualWarpDrive is set. */ 332 332 uint64_t u64VirtualWarpDriveStart; 333 /** The previously returned nano TS. 334 * This handles TSC drift on SMP systems and expired interval. 335 * This is a valid range u64NanoTS to u64NanoTS + 1000000000 (ie. 1sec). */ 336 uint64_t volatile u64VirtualRawPrev; 337 /** The number of times we've had to resort to 1ns walking. */ 338 uint32_t volatile c1nsVirtualRawSteps; 339 /** Number of times u64VirtualRawPrev has been considered bad. */ 340 uint32_t volatile cVirtualRawBadRawPrev; 333 341 334 342 /** The guest virtual timer synchronous time when fVirtualSyncTicking is cleared. */ -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r2861 r2869 47 47 48 48 49 /** 50 * This is (mostly) the same as rtTimeNanoTSInternal() except 51 * for the two globals which live in TM. 52 * 53 * @returns Nanosecond timestamp. 54 * @param pVM The VM handle. 55 */ 56 static uint64_t tmVirtualGetRawNanoTS(PVM pVM) 57 { 58 uint64_t u64Delta; 59 uint32_t u32NanoTSFactor0; 60 uint64_t u64TSC; 61 uint64_t u64NanoTS; 62 uint32_t u32UpdateIntervalTSC; 63 64 /* 65 * Read the GIP data. 66 */ 67 for (;;) 68 { 69 uint32_t u32TransactionId; 70 PCSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage; 71 #ifdef IN_RING3 72 if (RT_UNLIKELY(!pGip || pGip->u32Magic != SUPGLOBALINFOPAGE_MAGIC)) 73 return RTTimeSystemNanoTS(); 74 #endif 75 76 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC) 77 { 78 u32TransactionId = pGip->aCPUs[0].u32TransactionId; 79 #ifdef __L4__ 80 Assert((u32TransactionId & 1) == 0); 81 #endif 82 u32UpdateIntervalTSC = pGip->aCPUs[0].u32UpdateIntervalTSC; 83 u64NanoTS = pGip->aCPUs[0].u64NanoTS; 84 u64TSC = pGip->aCPUs[0].u64TSC; 85 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS; 86 u64Delta = ASMReadTSC(); 87 if (RT_UNLIKELY( pGip->aCPUs[0].u32TransactionId != u32TransactionId 88 || (u32TransactionId & 1))) 89 continue; 90 } 91 else 92 { 93 /* SUPGIPMODE_ASYNC_TSC */ 94 PCSUPGIPCPU pGipCpu; 95 96 uint8_t u8ApicId = ASMGetApicId(); 97 if (RT_LIKELY(u8ApicId < RT_ELEMENTS(pGip->aCPUs))) 98 pGipCpu = &pGip->aCPUs[u8ApicId]; 99 else 100 { 101 AssertMsgFailed(("%x\n", u8ApicId)); 102 pGipCpu = &pGip->aCPUs[0]; 103 } 104 105 u32TransactionId = pGipCpu->u32TransactionId; 106 #ifdef __L4__ 107 Assert((u32TransactionId & 1) == 0); 108 #endif 109 u32UpdateIntervalTSC = pGipCpu->u32UpdateIntervalTSC; 110 u64NanoTS = pGipCpu->u64NanoTS; 111 u64TSC = pGipCpu->u64TSC; 112 u32NanoTSFactor0 = pGip->u32UpdateIntervalNS; 113 u64Delta = ASMReadTSC(); 114 if (RT_UNLIKELY(u8ApicId != ASMGetApicId())) 115 continue; 116 if (RT_UNLIKELY( pGipCpu->u32TransactionId != u32TransactionId 117 || (u32TransactionId & 1))) 118 continue; 119 } 120 break; 121 } 122 123 /* 124 * Calc NanoTS delta. 125 */ 126 u64Delta -= u64TSC; 127 if (u64Delta > u32UpdateIntervalTSC) 128 { 129 /* 130 * We've expired the interval, cap it. If we're here for the 2nd 131 * time without any GIP update inbetween, the checks against 132 * pVM->tm.s.u64VirtualRawPrev below will force 1ns stepping. 133 */ 134 u64Delta = u32UpdateIntervalTSC; 135 } 136 #if !defined(_MSC_VER) || defined(__AMD64__) /* GCC makes very pretty code from these two inline calls, while MSC cannot. */ 137 u64Delta = ASMMult2xU32RetU64((uint32_t)u64Delta, u32NanoTSFactor0); 138 u64Delta = ASMDivU64ByU32RetU32(u64Delta, u32UpdateIntervalTSC); 139 #else 140 __asm 141 { 142 mov eax, dword ptr [u64Delta] 143 mul dword ptr [u32NanoTSFactor0] 144 div dword ptr [u32UpdateIntervalTSC] 145 mov dword ptr [u64Delta], eax 146 xor edx, edx 147 mov dword ptr [u64Delta + 4], edx 148 } 149 #endif 150 151 /* 152 * Calculate the time and compare it with the previously returned value. 153 * 154 * Since this function is called *very* frequently when the VM is running 155 * and then mostly on EMT, we can restrict the valid range of the delta 156 * (-1s to 2*GipUpdates) and simplify/optimize the default path. 157 */ 158 u64NanoTS += u64Delta; 159 uint64_t u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev); 160 uint64_t u64DeltaPrev = u64NanoTS - u64PrevNanoTS; 161 if (RT_LIKELY(u64DeltaPrev < 1000000000 /* 1s */)) 162 /* frequent - less than 1s since last call. */; 163 else if ( (int64_t)u64DeltaPrev < 0 164 && (int64_t)u64DeltaPrev + u32NanoTSFactor0 * 2 > 0) 165 { 166 /* occasional - u64NanoTS is in the 'past' relative to previous returns. */ 167 ASMAtomicIncU32(&pVM->tm.s.c1nsVirtualRawSteps); 168 u64NanoTS = u64PrevNanoTS + 1; 169 } 170 else if (u64PrevNanoTS) 171 { 172 /* Something has gone bust, if negative offset it's real bad.*/ 173 ASMAtomicIncU32(&pVM->tm.s.cVirtualRawBadRawPrev); 174 if ((int64_t)u64DeltaPrev < 0) 175 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n", 176 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta)); 177 else 178 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64 (debugging?)\n", 179 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta)); 180 #ifdef DEBUG_bird 181 AssertMsgFailed(("u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 u64Delta=%#RX64\n", 182 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, u64Delta)); 183 #endif 184 } 185 /* else: We're resuming (see TMVirtualResume). */ 186 if (RT_LIKELY(ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS))) 187 return u64NanoTS; 188 189 /* 190 * Attempt updating the previous value, provided we're still ahead of it. 191 * 192 * There is no point in recalculating u64NanoTS because we got preemted or if 193 * we raced somebody while the GIP was updated, since these are events 194 * that might occure at any point in the return path as well. 195 */ 196 for (int cTries = 100;;) 197 { 198 u64PrevNanoTS = ASMAtomicReadU64(&pVM->tm.s.u64VirtualRawPrev); 199 if (u64PrevNanoTS >= u64NanoTS) 200 break; 201 if (ASMAtomicCmpXchgU64(&pVM->tm.s.u64VirtualRawPrev, u64NanoTS, u64PrevNanoTS)) 202 break; 203 AssertBreak(--cTries <= 0, ); 204 } 205 206 return u64NanoTS; 207 } 208 209 49 210 50 211 /** … … 60 221 * warp drive has been enabled. 61 222 */ 62 uint64_t u64 = RTTimeNanoTS();223 uint64_t u64 = tmVirtualGetRawNanoTS(pVM); 63 224 u64 -= pVM->tm.s.u64VirtualWarpDriveStart; 64 225 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage; … … 68 229 /* 69 230 * Now we apply the virtual time offset. 70 * (Which is the negate RTTimeNanoTS() value for when the virtual machine71 * started if it had been running continuously without any suspends.)231 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual 232 * machine started if it had been running continuously without any suspends.) 72 233 */ 73 234 u64 -= pVM->tm.s.u64VirtualOffset; … … 85 246 { 86 247 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive)) 87 return RTTimeNanoTS() - pVM->tm.s.u64VirtualOffset;248 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset; 88 249 return tmVirtualGetRawNonNormal(pVM); 89 250 } … … 209 370 * - We might be on a different CPU which TSC isn't quite in sync with the 210 371 * other CPUs in the system. 211 * - RTTimeNanoTS() is returning sligtly different values in GC, R0 and R3 because212 * of the static variable it uses with the previous read time.213 372 * - Another thread is racing us and we might have been preemnted while inside 214 373 * this function. … … 350 509 { 351 510 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume); 352 pVM->tm.s.u64VirtualWarpDriveStart = RTTimeNanoTS(); 511 pVM->tm.s.u64VirtualRawPrev = 0; 512 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM); 353 513 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual; 354 514 pVM->tm.s.fVirtualTicking = true; -
trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp
r2283 r2869 597 597 GEN_CHECK_OFF(TM, u64VirtualOffset); 598 598 GEN_CHECK_OFF(TM, u64Virtual); 599 GEN_CHECK_OFF(TM, u64VirtualRawPrev); 600 GEN_CHECK_OFF(TM, c1nsVirtualRawSteps); 601 GEN_CHECK_OFF(TM, cVirtualRawBadRawPrev); 599 602 GEN_CHECK_OFF(TM, u64VirtualWarpDriveStart); 600 603 GEN_CHECK_OFF(TM, u64VirtualSync);
Note:
See TracChangeset
for help on using the changeset viewer.