- Timestamp:
- Apr 20, 2007 10:27:52 PM (18 years ago)
- svn:sync-xref-src-repo-rev:
- 20648
- Location:
- trunk
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vm.h
r2248 r2283 477 477 struct TM s; 478 478 #endif 479 char padding[1 056]; /* multiple of 32 */479 char padding[1280]; /* multiple of 32 */ 480 480 } tm; 481 481 -
trunk/src/VBox/VMM/TM.cpp
r2248 r2283 84 84 * - Employ a thread which periodically (100Hz) polls all the timer queues. 85 85 * 86 * 87 * @section sec_tm_timer Logging 88 * 89 * Level 2: Logs a most of the timer state transitions and queue servicing. 90 * Level 3: Logs a few oddments. 91 * Level 4: Logs TMCLOCK_VIRTUAL_SYNC catch-up events. 92 * 86 93 */ 87 94 … … 120 127 *******************************************************************************/ 121 128 /** The current saved state version.*/ 122 #define TM_SAVED_STATE_VERSION 2129 #define TM_SAVED_STATE_VERSION 3 123 130 124 131 … … 132 139 static void tmR3TimerQueueRun(PVM pVM, PTMTIMERQUEUE pQueue); 133 140 static void tmR3TimerQueueRunVirtualSync(PVM pVM); 134 static uint64_t tmR3TimerQueueRunVirtualSyncGiveup(PVM pVM, uint64_t offNew);135 141 static DECLCALLBACK(void) tmR3TimerInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 136 142 static DECLCALLBACK(void) tmR3TimerInfoActive(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); … … 283 289 rc = CFGMR3QueryU32(pCfgHandle, "ScheduleSlack", &pVM->tm.s.u32VirtualSyncScheduleSlack); 284 290 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 285 pVM->tm.s.u32VirtualSyncScheduleSlack = 250000; /* 0.25ms (ASSUMES virtual time is nanoseconds) */291 pVM->tm.s.u32VirtualSyncScheduleSlack = 100000; /* 0.100ms (ASSUMES virtual time is nanoseconds) */ 286 292 else if (VBOX_FAILURE(rc)) 287 293 return VMSetError(pVM, rc, RT_SRC_POS, … … 297 303 rc = CFGMR3QueryU64(pCfgHandle, "CatchUpGiveUpThreshold", &pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold); 298 304 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 299 pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold = 1500000000; /* 1.5sec */305 pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold = UINT64_C(60000000000); /* 60 sec */ 300 306 else if (VBOX_FAILURE(rc)) 301 307 return VMSetError(pVM, rc, RT_SRC_POS, … … 309 315 rc = CFGMR3QueryU64(pCfgHandle, "CatchUpStartThreshold" #iPeriod, &u64); \ 310 316 if (rc == VERR_CFGM_VALUE_NOT_FOUND) \ 311 u64 = (DefStart); \317 u64 = UINT64_C(DefStart); \ 312 318 else if (VBOX_FAILURE(rc)) \ 313 319 return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying 64-bit integer value \"CatchUpThreshold" #iPeriod "\". (%Vrc)"), rc); \ … … 322 328 return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying 32-bit integer value \"CatchUpPrecentage" #iPeriod "\". (%Vrc)"), rc); \ 323 329 } while (0) 324 TM_CFG_PERIOD(0, 25000000, 25); /* 25ms at 1.25x */ 325 TM_CFG_PERIOD(1, 75000000, 50); /* 75ms at 1.50x */ 326 TM_CFG_PERIOD(2, 100000000, 75); /* 75ms at 1.75x */ 327 TM_CFG_PERIOD(3, 150000000, 100); /* 150ms at 2x */ 328 TM_CFG_PERIOD(4, 400000000, 200); /* 400ms at 3x */ 329 TM_CFG_PERIOD(5, 800000000, 300); /* 800ms at 4x */ 330 TM_CFG_PERIOD(6, 1200000000, 400); /* 1200ms at 6x */ 331 TM_CFG_PERIOD(7, 1400000000, 500); /* 1400ms at 8x */ 332 AssertCompile(RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods) == 8); 330 /* This needs more tuning. Not sure if we really need so many period and be so gentle. */ 331 TM_CFG_PERIOD(0, 750000, 5); /* 0.75ms at 1.05x */ 332 TM_CFG_PERIOD(1, 1500000, 10); /* 1.50ms at 1.10x */ 333 TM_CFG_PERIOD(2, 8000000, 25); /* 8ms at 1.25x */ 334 TM_CFG_PERIOD(3, 30000000, 50); /* 30ms at 1.50x */ 335 TM_CFG_PERIOD(4, 100000000, 75); /* 100ms at 1.75x */ 336 TM_CFG_PERIOD(5, 175000000, 100); /* 175ms at 2x */ 337 TM_CFG_PERIOD(6, 500000000, 200); /* 500ms at 3x */ 338 TM_CFG_PERIOD(7, 3000000000, 300); /* 3s at 4x */ 339 TM_CFG_PERIOD(8,30000000000, 400); /* 30s at 5x */ 340 TM_CFG_PERIOD(9,55000000000, 500); /* 55s at 6x */ 341 AssertCompile(RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods) == 10); 333 342 #undef TM_CFG_PERIOD 334 343 … … 411 420 STAM_REG(pVM, &pVM->tm.s.StatTimerStopR3, STAMTYPE_PROFILE, "/TM/TimerStopR3", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerStop calls made in ring-3."); 412 421 413 STAM_REG(pVM, &pVM->tm.s.StatVirtualGet, STAMTYPE_COUNTER, "/TM/VirtualGet", STAMUNIT_OCCURENCES, "The number of times TMR3TimerGet was called when the clock was running."); 414 STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSync, STAMTYPE_COUNTER, "/TM/VirtualGetSync", STAMUNIT_OCCURENCES, "The number of times TMR3TimerGetSync was called when the clock was running."); 422 STAM_REG(pVM, &pVM->tm.s.StatVirtualGet, STAMTYPE_COUNTER, "/TM/VirtualGet", STAMUNIT_OCCURENCES, "The number of times TMTimerGet was called when the clock was running."); 423 STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSetFF, STAMTYPE_COUNTER, "/TM/VirtualGetSetFF", STAMUNIT_OCCURENCES, "Times we set the FF when calling TMTimerGet."); 424 STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSync, STAMTYPE_COUNTER, "/TM/VirtualGetSync", STAMUNIT_OCCURENCES, "The number of times TMTimerGetSync was called when the clock was running."); 425 STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSyncSetFF,STAMTYPE_COUNTER, "/TM/VirtualGetSyncSetFF",STAMUNIT_OCCURENCES, "Times we set the FF when calling TMTimerGetSync."); 415 426 STAM_REG(pVM, &pVM->tm.s.StatVirtualPause, STAMTYPE_COUNTER, "/TM/VirtualPause", STAMUNIT_OCCURENCES, "The number of times TMR3TimerPause was called."); 416 427 STAM_REG(pVM, &pVM->tm.s.StatVirtualResume, STAMTYPE_COUNTER, "/TM/VirtualResume", STAMUNIT_OCCURENCES, "The number of times TMR3TimerResume was called."); … … 419 430 420 431 432 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncCatchup, STAMTYPE_PROFILE_ADV, "/TM/VirtualSync/CatchUp", STAMUNIT_TICKS_PER_OCCURENCE, "Counting and measuring the times spent catching up."); 433 STAM_REG(pVM, (void *)&pVM->tm.s.fVirtualSyncCatchUp, STAMTYPE_U8, "/TM/VirtualSync/CatchUpActive", STAMUNIT_NONE, "Catch-Up active indicator."); 434 STAM_REG(pVM, (void *)&pVM->tm.s.u32VirtualSyncCatchUpPercentage, STAMTYPE_U32, "/TM/VirtualSync/CatchUpPercentage", STAMUNIT_PCT, "The catch-up percentage. (+100/100 to get clock multiplier)"); 435 STAM_REG(pVM, (void *)&pVM->tm.s.offVirtualSync, STAMTYPE_U64, "/TM/VirtualSync/CurrentOffset", STAMUNIT_NS, "The current offset. (subtract GivenUp to get the lag)"); 436 STAM_REG(pVM, (void *)&pVM->tm.s.offVirtualSyncGivenUp, STAMTYPE_U64, "/TM/VirtualSync/GivenUp", STAMUNIT_NS, "Nanoseconds of the 'CurrentOffset' that's been given up and won't ever be attemted caught up with."); 437 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUp, STAMTYPE_COUNTER, "/TM/VirtualSync/GiveUp", STAMUNIT_OCCURENCES, "Times the catch-up was abandoned."); 438 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting,STAMTYPE_COUNTER, "/TM/VirtualSync/GiveUpBeforeStarting", STAMUNIT_OCCURENCES, "Times the catch-up was abandoned before even starting. (Typically debugging++.)"); 421 439 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRun, STAMTYPE_COUNTER, "/TM/VirtualSync/Run", STAMUNIT_OCCURENCES, "Times the virtual sync timer queue was considered."); 422 440 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunRestart, STAMTYPE_COUNTER, "/TM/VirtualSync/Run/Restarts", STAMUNIT_OCCURENCES, "Times the clock was restarted after a run."); … … 424 442 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunStoppedAlready, STAMTYPE_COUNTER, "/TM/VirtualSync/Run/StoppedAlready", STAMUNIT_OCCURENCES, "Times the clock was already stopped elsewhere (TMVirtualSyncGet)."); 425 443 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunSlack, STAMTYPE_PROFILE, "/TM/VirtualSync/Run/Slack", STAMUNIT_NS_PER_OCCURENCE, "The scheduling slack. (Catch-up handed out when running timers.)"); 426 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUp, STAMTYPE_COUNTER, "/TM/VirtualSync/GiveUp", STAMUNIT_OCCURENCES, "Times the catch-up was abandoned.");427 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting,STAMTYPE_COUNTER, "/TM/VirtualSync/GiveUpBeforeStarting", STAMUNIT_OCCURENCES, "Times the catch-up was abandoned before even starting. (Typically debugging++.)");428 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncCatchup, STAMTYPE_PROFILE_ADV, "/TM/VirtualSync/CatchUp", STAMUNIT_TICKS_PER_OCCURENCE, "Counting and measuring the times spent catching up.");429 444 for (unsigned i = 0; i < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods); i++) 430 445 { 431 STAMR3RegisterF(pVM, &pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, "The catch-up percentage.", "/TM/VirtualSync/ %u", i);432 STAMR3RegisterF(pVM, &pVM->tm.s.aStatVirtualSyncCatchupAdjust[i], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Times adjusted to this period.", "/TM/VirtualSync/ %u/Adjust", i);433 STAMR3RegisterF(pVM, &pVM->tm.s.aStatVirtualSyncCatchupInitial[i], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Times started in this period.", "/TM/VirtualSync/ %u/Initial", i);434 STAMR3RegisterF(pVM, &pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u64Start, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "Start of this period (lag).", "/TM/VirtualSync/ %u/Start", i);446 STAMR3RegisterF(pVM, &pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, "The catch-up percentage.", "/TM/VirtualSync/Periods/%u", i); 447 STAMR3RegisterF(pVM, &pVM->tm.s.aStatVirtualSyncCatchupAdjust[i], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Times adjusted to this period.", "/TM/VirtualSync/Periods/%u/Adjust", i); 448 STAMR3RegisterF(pVM, &pVM->tm.s.aStatVirtualSyncCatchupInitial[i], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Times started in this period.", "/TM/VirtualSync/Periods/%u/Initial", i); 449 STAMR3RegisterF(pVM, &pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u64Start, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "Start of this period (lag).", "/TM/VirtualSync/Periods/%u/Start", i); 435 450 } 436 451 … … 440 455 * Register info handlers. 441 456 */ 442 DBGFR3InfoRegisterInternal (pVM, "timers", "Dumps all timers. No arguments.", tmR3TimerInfo);443 DBGFR3InfoRegisterInternal (pVM, "activetimers", "Dumps active all timers. No arguments.", tmR3TimerInfoActive);444 DBGFR3InfoRegisterInternal (pVM, "clocks", "Display the time of the various clocks.", tmR3InfoClocks);457 DBGFR3InfoRegisterInternalEx(pVM, "timers", "Dumps all timers. No arguments.", tmR3TimerInfo, DBGFINFO_FLAGS_RUN_ON_EMT); 458 DBGFR3InfoRegisterInternalEx(pVM, "activetimers", "Dumps active all timers. No arguments.", tmR3TimerInfoActive, DBGFINFO_FLAGS_RUN_ON_EMT); 459 DBGFR3InfoRegisterInternalEx(pVM, "clocks", "Display the time of the various clocks.", tmR3InfoClocks, DBGFINFO_FLAGS_RUN_ON_EMT); 445 460 446 461 return VINF_SUCCESS; … … 654 669 /* the virtual timer synchronous clock. */ 655 670 SSMR3PutU64(pSSM, pVM->tm.s.u64VirtualSync); 656 SSMR3PutU64(pSSM, pVM->tm.s.u64VirtualSyncOffset); 671 SSMR3PutU64(pSSM, pVM->tm.s.offVirtualSync); 672 SSMR3PutU64(pSSM, pVM->tm.s.offVirtualSyncGivenUp); 657 673 SSMR3PutU64(pSSM, pVM->tm.s.u64VirtualSyncCatchUpPrev); 658 674 SSMR3PutBool(pSSM, pVM->tm.s.fVirtualSyncCatchUp); … … 715 731 pVM->tm.s.u64VirtualSync = u64; 716 732 SSMR3GetU64(pSSM, &u64); 717 pVM->tm.s.u64VirtualSyncOffset = u64; 733 pVM->tm.s.offVirtualSync = u64; 734 SSMR3GetU64(pSSM, &u64); 735 pVM->tm.s.offVirtualSyncGivenUp = u64; 718 736 SSMR3GetU64(pSSM, &u64); 719 737 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64; … … 1030 1048 return true; 1031 1049 u64Now = pVM->tm.s.fVirtualSyncTicking 1032 ? u64Now - pVM->tm.s. u64VirtualSyncOffset1050 ? u64Now - pVM->tm.s.offVirtualSync 1033 1051 : pVM->tm.s.u64VirtualSync; 1034 1052 if (pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64Now) … … 1253 1271 * 1254 1272 * We use a time frame ranging from the current sync time (which is most likely the 1255 * same as the head timer) and some configurable period ( 250000ns) up towards the1273 * same as the head timer) and some configurable period (100000ns) up towards the 1256 1274 * current virtual time. This period might also need to be restricted by the catch-up 1257 1275 * rate so frequent calls to this function won't accelerate the time too much, however 1258 * this will be implemented at a later point .1276 * this will be implemented at a later point if neccessary. 1259 1277 * 1260 1278 * Without this frame we would 1) having to run timers much more frequently … … 1263 1281 const uint64_t u64VirtualNow = TMVirtualGetEx(pVM, false /* don't check timers */); 1264 1282 uint64_t u64Now; 1265 uint64_t u64Max;1266 1283 if (!pVM->tm.s.fVirtualSyncTicking) 1267 1284 { 1268 1285 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunStoppedAlready); 1269 1286 u64Now = pVM->tm.s.u64VirtualSync; 1270 Assert(u64Now >= pNext->u64Expire); 1271 1272 u64Max = u64Now + pVM->tm.s.u32VirtualSyncScheduleSlack; 1273 if (u64Max > u64VirtualNow) 1274 u64Max = u64VirtualNow; 1287 Assert(u64Now <= pNext->u64Expire); 1275 1288 } 1276 1289 else 1277 1290 { 1278 /* Calc now.*/1279 uint64_t off = pVM->tm.s. u64VirtualSyncOffset;1291 /* Calc 'now'. (update order doesn't really matter here) */ 1292 uint64_t off = pVM->tm.s.offVirtualSync; 1280 1293 if (pVM->tm.s.fVirtualSyncCatchUp) 1281 1294 { 1282 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev; 1283 uint64_t u64Delta = u64VirtualNow - u64Prev; 1295 uint64_t u64Delta = u64VirtualNow - pVM->tm.s.u64VirtualSyncCatchUpPrev; 1284 1296 if (RT_LIKELY(!(u64Delta >> 32))) 1285 1297 { 1286 uint32_t u32Sub = ASMDivU64ByU32RetU32(ASMMult2xU32RetU64((uint32_t)u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage), 1287 100); 1288 if (off > u32Sub) 1289 off -= u32Sub; 1298 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100); 1299 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp) 1300 { 1301 off -= u64Sub; 1302 Log4(("TM: %RU64/%RU64: sub %RU64 (run)\n", u64VirtualNow - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub)); 1303 } 1290 1304 else 1291 off = 0; 1305 { 1306 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c); 1307 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false); 1308 off = pVM->tm.s.offVirtualSyncGivenUp; 1309 Log4(("TM: %RU64/0: caught up (run)\n", u64VirtualNow)); 1310 } 1292 1311 } 1312 ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off); 1313 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64VirtualNow; 1293 1314 } 1294 1315 u64Now = u64VirtualNow - off; 1295 1316 1296 /* Check if stopped by expired timer and calc the frame end. */ 1297 if (u64Now <= pNext->u64Expire) 1298 { 1299 if (pVM->tm.s.u64VirtualSyncOffset <= pVM->tm.s.u32VirtualSyncScheduleSlack) 1300 u64Max = pVM->tm.s.u64VirtualSyncOffset; 1301 else 1302 u64Max = pVM->tm.s.u32VirtualSyncScheduleSlack; 1303 } 1304 else 1317 /* Check if stopped by expired timer. */ 1318 if (u64Now >= pNext->u64Expire) 1305 1319 { 1306 1320 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunStop); … … 1308 1322 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64Now); 1309 1323 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false); 1310 1311 u64Max = u64Now + pVM->tm.s.u32VirtualSyncScheduleSlack; 1312 if (u64Max > u64VirtualNow) 1313 u64Max = u64VirtualNow; 1324 Log4(("TM: %RU64/%RU64: exp tmr (run)\n", u64Now, u64VirtualNow - u64Now - pVM->tm.s.offVirtualSyncGivenUp)); 1325 1314 1326 } 1315 1327 } 1316 1328 1329 /* calc end of frame. */ 1330 uint64_t u64Max = u64Now + pVM->tm.s.u32VirtualSyncScheduleSlack; 1331 if (u64Max > u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp) 1332 u64Max = u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp; 1333 1334 /* assert sanity */ 1335 Assert(u64Now <= u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp); 1336 Assert(u64Max <= u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp); 1337 Assert(u64Now <= u64Max); 1338 1317 1339 /* 1318 1340 * Process the expired timers moving the clock along as we progress. 1319 1341 */ 1342 #ifdef VBOX_STRICT 1343 uint64_t u64Prev = u64Now; NOREF(u64Prev); 1344 #endif 1320 1345 while (pNext && pNext->u64Expire <= u64Max) 1321 1346 { … … 1328 1353 if (fRc) 1329 1354 { 1330 Assert(!pTimer->offScheduleNext); /* this can trigger falsely */1331 1332 1355 /* unlink */ 1333 1356 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer); … … 1344 1367 pTimer->offPrev = 0; 1345 1368 1346 /* advance the clock */ 1369 /* advance the clock - don't permit timers to be out of order or armed in the 'past'. */ 1370 #ifdef VBOX_STRICT 1371 AssertMsg(pTimer->u64Expire >= u64Prev, ("%RU64 < %RU64 %s\n", pTimer->u64Expire, u64Prev, pTimer->pszDesc)); 1372 u64Prev = pTimer->u64Expire; 1373 #endif 1347 1374 ASMAtomicXchgSize(&pVM->tm.s.fVirtualSyncTicking, false); 1348 ASMAtomicXchgU64(&pVM->tm.s.u64Virtual , pTimer->u64Expire);1375 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, pTimer->u64Expire); 1349 1376 1350 1377 /* fire */ … … 1375 1402 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunRestart); 1376 1403 1404 /* calc the slack we've handed out. */ 1377 1405 const uint64_t u64VirtualNow2 = TMVirtualGetEx(pVM, false /* don't check timers */); 1378 1406 Assert(u64VirtualNow2 >= u64VirtualNow); 1407 AssertMsg(pVM->tm.s.u64VirtualSync >= u64Now, ("%RU64 < %RU64\n", pVM->tm.s.u64VirtualSync, u64Now)); 1379 1408 const uint64_t offSlack = pVM->tm.s.u64VirtualSync - u64Now; 1380 STAM_STATS( { 1381 if (offSlack) { 1409 STAM_STATS({ 1410 if (offSlack) 1411 { 1382 1412 PSTAMPROFILE p = &pVM->tm.s.StatVirtualSyncRunSlack; 1383 p->cPeriods++; 1413 p->cPeriods++; 1384 1414 p->cTicks += offSlack; 1385 1415 if (p->cTicksMax < offSlack) p->cTicksMax = offSlack; 1386 if (p->cTicksMin > offSlack) p->cTicksM ax= offSlack;1387 } 1416 if (p->cTicksMin > offSlack) p->cTicksMin = offSlack; 1417 } 1388 1418 }); 1389 1419 1390 1420 /* Let the time run a little bit while we were busy running timers(?). */ 1391 1421 uint64_t u64Elapsed; 1392 #define MAX_ELAPSED 15000 /*ns*/1422 #define MAX_ELAPSED 30000 /* ns */ 1393 1423 if (offSlack > MAX_ELAPSED) 1394 1424 u64Elapsed = 0; … … 1404 1434 /* Calc the current offset. */ 1405 1435 uint64_t offNew = u64VirtualNow2 - pVM->tm.s.u64VirtualSync - u64Elapsed; 1406 1407 /* Deal with starting, adjusting and stopping catchup. */ 1436 Assert(!(offNew & RT_BIT_64(63))); 1437 uint64_t offLag = offNew - pVM->tm.s.offVirtualSyncGivenUp; 1438 Assert(!(offLag & RT_BIT_64(63))); 1439 1440 /* 1441 * Deal with starting, adjusting and stopping catchup. 1442 */ 1408 1443 if (pVM->tm.s.fVirtualSyncCatchUp) 1409 1444 { 1410 if (off New<= pVM->tm.s.u64VirtualSyncCatchUpStopThreshold)1445 if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpStopThreshold) 1411 1446 { 1412 1447 /* stop */ 1413 1448 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c); 1414 1449 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false); 1450 Log4(("TM: %RU64/%RU64: caught up\n", u64VirtualNow2 - offNew, offLag)); 1415 1451 } 1416 else if (off New<= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold)1452 else if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold) 1417 1453 { 1418 1454 /* adjust */ 1419 1455 unsigned i = 0; 1420 1456 while ( i + 1 < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods) 1421 && off New>= pVM->tm.s.aVirtualSyncCatchUpPeriods[i + 1].u64Start)1457 && offLag >= pVM->tm.s.aVirtualSyncCatchUpPeriods[i + 1].u64Start) 1422 1458 i++; 1423 1459 if (pVM->tm.s.u32VirtualSyncCatchUpPercentage < pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage) … … 1425 1461 STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupAdjust[i]); 1426 1462 ASMAtomicXchgU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage); 1463 Log4(("TM: %RU64/%RU64: adj %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage)); 1427 1464 } 1428 1465 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64VirtualNow2; … … 1432 1469 /* give up */ 1433 1470 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUp); 1434 offNew = tmR3TimerQueueRunVirtualSyncGiveup(pVM, offNew); 1471 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c); 1472 ASMAtomicXchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew); 1473 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false); 1474 Log4(("TM: %RU64/%RU64: give up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage)); 1475 LogRel(("TM: Giving up catch-up attempt at a %RU64 ns lag; new total: %RU64 ns\n", offLag, offNew)); 1435 1476 } 1436 1477 } 1437 else if (off New>= pVM->tm.s.aVirtualSyncCatchUpPeriods[0].u64Start)1478 else if (offLag >= pVM->tm.s.aVirtualSyncCatchUpPeriods[0].u64Start) 1438 1479 { 1439 if (off New<= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold)1480 if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold) 1440 1481 { 1441 1482 /* start */ … … 1443 1484 unsigned i = 0; 1444 1485 while ( i + 1 < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods) 1445 && off New>= pVM->tm.s.aVirtualSyncCatchUpPeriods[i + 1].u64Start)1486 && offLag >= pVM->tm.s.aVirtualSyncCatchUpPeriods[i + 1].u64Start) 1446 1487 i++; 1447 1488 STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupInitial[i]); 1448 1489 ASMAtomicXchgU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage); 1449 1490 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, true); 1491 Log4(("TM: %RU64/%RU64: catch-up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage)); 1450 1492 } 1451 1493 else … … 1453 1495 /* not bother */ 1454 1496 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting); 1455 offNew = tmR3TimerQueueRunVirtualSyncGiveup(pVM, offNew); 1497 ASMAtomicXchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew); 1498 Log4(("TM: %RU64/%RU64: give up\n", u64VirtualNow2 - offNew, offLag)); 1499 LogRel(("TM: Not bothering to attempt catching up a %RU64 ns lag; new total: %RU64\n", offLag, offNew)); 1456 1500 } 1457 1501 } 1458 1502 1459 /* Update the offset and start the clock. */ 1460 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSyncOffset, offNew); 1503 /* 1504 * Update the offset and restart the clock. 1505 */ 1506 ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, offNew); 1461 1507 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, true); 1462 1508 } 1463 }1464 1465 1466 /**1467 * Give up the chase.1468 *1469 * Not quite sure how to let the devices know about this, but somehow they will have1470 * to (quietly) drop interrupts en masse and not cause any interrupt storms...1471 *1472 * @returns New offset.1473 *1474 * @param pVM The VM handle.1475 * @param offNew The current offset.1476 */1477 static uint64_t tmR3TimerQueueRunVirtualSyncGiveup(PVM pVM, uint64_t offNew)1478 {1479 /** @todo deal with this. */1480 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);1481 return 0;1482 1509 } 1483 1510 … … 1658 1685 pTimer->offPrev, 1659 1686 pTimer->offScheduleNext, 1660 pTimer->enmClock == TMCLOCK_REAL ? "Real " : "Virt ", 1687 pTimer->enmClock == TMCLOCK_REAL 1688 ? "Real " 1689 : pTimer->enmClock == TMCLOCK_VIRTUAL 1690 ? "Virt " 1691 : pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC 1692 ? "VrSy " 1693 : "TSC ", 1661 1694 TMTimerGet(pTimer), 1662 1695 pTimer->u64Expire, … … 1679 1712 NOREF(pszArgs); 1680 1713 1681 /* TSC */ 1682 uint64_t u64 = TMCpuTickGet(pVM); 1714 /* 1715 * Read the times first to avoid more than necessary time variation. 1716 */ 1717 const uint64_t u64TSC = TMCpuTickGet(pVM); 1718 const uint64_t u64Virtual = TMVirtualGet(pVM); 1719 const uint64_t u64VirtualSync = TMVirtualSyncGet(pVM); 1720 const uint64_t u64Real = TMRealGet(pVM); 1721 1722 /* 1723 * TSC 1724 */ 1683 1725 pHlp->pfnPrintf(pHlp, 1684 "Cpu Tick: % #RX64 (%RU64) %RU64Hz %s%s",1685 u64 , u64, TMCpuTicksPerSecond(pVM),1726 "Cpu Tick: %18RU64 (%#016RX64) %RU64Hz %s%s", 1727 u64TSC, u64TSC, TMCpuTicksPerSecond(pVM), 1686 1728 pVM->tm.s.fTSCTicking ? "ticking" : "paused", 1687 1729 pVM->tm.s.fTSCVirtualized ? " - virtualized" : ""); … … 1690 1732 pHlp->pfnPrintf(pHlp, " - real tsc"); 1691 1733 if (pVM->tm.s.u64TSCOffset) 1692 pHlp->pfnPrintf(pHlp, "\n offset % #RX64", pVM->tm.s.u64TSCOffset);1734 pHlp->pfnPrintf(pHlp, "\n offset %RU64", pVM->tm.s.u64TSCOffset); 1693 1735 } 1694 1736 else … … 1696 1738 pHlp->pfnPrintf(pHlp, "\n"); 1697 1739 1698 /* virtual */ 1699 u64 = TMVirtualGet(pVM); 1740 /* 1741 * virtual 1742 */ 1700 1743 pHlp->pfnPrintf(pHlp, 1701 " Virtual: % #RX64 (%RU64) %RU64Hz %s",1702 u64 , u64, TMVirtualGetFreq(pVM),1744 " Virtual: %18RU64 (%#016RX64) %RU64Hz %s", 1745 u64Virtual, u64Virtual, TMVirtualGetFreq(pVM), 1703 1746 pVM->tm.s.fVirtualTicking ? "ticking" : "paused"); 1704 1747 if (pVM->tm.s.fVirtualWarpDrive) … … 1706 1749 pHlp->pfnPrintf(pHlp, "\n"); 1707 1750 1708 /* virtual sync */ 1709 u64 = TMVirtualSyncGet(pVM); 1751 /* 1752 * virtual sync 1753 */ 1710 1754 pHlp->pfnPrintf(pHlp, 1711 "VirtSync: % #RX64 (%RU64) %s%s",1712 u64 , u64,1755 "VirtSync: %18RU64 (%#016RX64) %s%s", 1756 u64VirtualSync, u64VirtualSync, 1713 1757 pVM->tm.s.fVirtualSyncTicking ? "ticking" : "paused", 1714 1758 pVM->tm.s.fVirtualSyncCatchUp ? " - catchup" : ""); 1715 if (pVM->tm.s.u64VirtualSyncOffset) 1716 pHlp->pfnPrintf(pHlp, "\n offset %#RX64", pVM->tm.s.u64VirtualSyncOffset); 1759 if (pVM->tm.s.offVirtualSync) 1760 { 1761 pHlp->pfnPrintf(pHlp, "\n offset %RU64", pVM->tm.s.offVirtualSync); 1762 if (pVM->tm.s.u32VirtualSyncCatchUpPercentage) 1763 pHlp->pfnPrintf(pHlp, " catch-up rate %u %%", pVM->tm.s.u32VirtualSyncCatchUpPercentage); 1764 } 1717 1765 pHlp->pfnPrintf(pHlp, "\n"); 1718 1766 1719 /* real */ 1720 u64 = TMRealGet(pVM); 1767 /* 1768 * real 1769 */ 1721 1770 pHlp->pfnPrintf(pHlp, 1722 " Real: %#RX64 (%RU64) %RU64Hz\n", 1723 u64, u64, TMRealGetFreq(pVM)); 1724 } 1771 " Real: %18RU64 (%#016RX64) %RU64Hz\n", 1772 u64Real, u64Real, TMRealGetFreq(pVM)); 1773 } 1774 -
trunk/src/VBox/VMM/TMInternal.h
r2248 r2283 329 329 uint64_t u64VirtualWarpDriveStart; 330 330 331 /** The offset of the virtual timer synchronous clock (TMCLOCK_VIRTUAL_SYNC) relative 332 * to the virtual clock. */ 333 uint64_t volatile u64VirtualSyncOffset; 331 /** The guest virtual timer synchronous time when fVirtualSyncTicking is cleared. */ 332 uint64_t volatile u64VirtualSync; 333 /** The offset of the timer synchronous virtual clock (TMCLOCK_VIRTUAL_SYNC) relative 334 * to the virtual clock (TMCLOCK_VIRTUAL). 335 * (This is accessed by the timer thread and must be updated atomically.) */ 336 uint64_t volatile offVirtualSync; 337 /** The offset into offVirtualSync that's been irrevocably given up by failed catch-up attempts. 338 * Thus the current lag is offVirtualSync - offVirtualSyncGivenUp. */ 339 uint64_t offVirtualSyncGivenUp; 334 340 /** The TMCLOCK_VIRTUAL at the previous TMVirtualGetSync call when catch-up is active. */ 335 341 uint64_t volatile u64VirtualSyncCatchUpPrev; 336 /** The guest virtual timer synchronous time when fVirtualSyncTicking is cleared. */337 uint64_t volatile u64VirtualSync;338 342 /** The current catch-up percentage. */ 339 343 uint32_t volatile u32VirtualSyncCatchUpPercentage; … … 346 350 /** @def TM_MAX_CATCHUP_PERIODS 347 351 * The number of catchup rates. */ 348 #define TM_MAX_CATCHUP_PERIODS 8352 #define TM_MAX_CATCHUP_PERIODS 10 349 353 /** The agressivness of the catch-up relative to how far we've lagged behind. 350 354 * The idea is to have increasing catch-up percentage as the lag increases. */ … … 387 391 388 392 /** Alignment padding to ensure that the statistics are 64-bit aligned when using GCC. */ 389 uint32_t u32Padding ;393 uint32_t u32Padding1; 390 394 391 395 /** TMR3TimerQueuesDo … … 408 412 * @{ */ 409 413 STAMCOUNTER StatVirtualGet; 414 STAMCOUNTER StatVirtualGetSetFF; 410 415 STAMCOUNTER StatVirtualGetSync; 416 STAMCOUNTER StatVirtualGetSyncSetFF; 411 417 STAMCOUNTER StatVirtualPause; 412 418 STAMCOUNTER StatVirtualResume; -
trunk/src/VBox/VMM/VM.cpp
r2233 r2283 2561 2561 DECLCALLBACK(void) vmR3SetErrorV(PVM pVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs) 2562 2562 { 2563 #ifdef LOG_ENABLED 2564 /* 2565 * Log the error. 2566 */ 2567 RTLogPrintf("VMSetError: %s(%d) %s\n", pszFile, iLine, pszFunction); 2568 va_list va3; 2569 va_copy(va3, *pArgs); 2570 RTLogPrintfV(pszFormat, va3); 2571 va_end(va3); 2572 #endif 2573 2563 2574 /* 2564 2575 * Make a copy of the message. -
trunk/src/VBox/VMM/VMEmt.cpp
r1633 r2283 39 39 #include <iprt/semaphore.h> 40 40 #include <iprt/thread.h> 41 #include <iprt/time.h> 41 42 42 43 … … 308 309 * and the yielder is suspended. 309 310 */ 310 // TMCpuTickResume(pVM);311 311 VMMR3YieldSuspend(pVM); 312 312 … … 316 316 int rc = VINF_SUCCESS; 317 317 ASMAtomicXchgU32(&pVM->vm.s.fWait, 1); 318 //unsigned cLoops = 0;318 unsigned cLoops = 0; 319 319 for (;;) 320 320 { 321 #ifdef VBOX_HIGH_RES_TIMERS_HACK322 321 /* 323 322 * Work the timers and check if we can exit. … … 343 342 if (u64NanoTS < 50000) 344 343 { 345 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);344 RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++); 346 345 /* spin */; 347 346 } … … 349 348 { 350 349 VMMR3YieldStop(pVM); 350 uint64_t u64Start = RTTimeNanoTS(); 351 351 if (u64NanoTS < 870000) /* this is a bit speculative... works fine on linux. */ 352 352 { 353 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield\n", u64NanoTS, cLoops++);353 RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++); 354 354 STAM_PROFILE_ADV_START(&pVM->vm.s.StatHaltYield, a); 355 355 RTThreadYield(); /* this is the best we can do here */ … … 358 358 else if (u64NanoTS < 2000000) 359 359 { 360 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms\n", u64NanoTS, cLoops++);360 RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++); 361 361 STAM_PROFILE_ADV_START(&pVM->vm.s.StatHaltBlock, a); 362 362 rc = RTSemEventWait(pVM->vm.s.EventSemWait, 1); … … 365 365 else 366 366 { 367 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms\n", u64NanoTS, cLoops++, (uint32_t)RT_MIN(u64NanoTS/ 1000000, 15));367 RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15)); 368 368 STAM_PROFILE_ADV_START(&pVM->vm.s.StatHaltBlock, a); 369 rc = RTSemEventWait(pVM->vm.s.EventSemWait, RT_MIN( u64NanoTS/ 1000000, 15));369 rc = RTSemEventWait(pVM->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15)); 370 370 STAM_PROFILE_ADV_STOP(&pVM->vm.s.StatHaltBlock, a); 371 371 } 372 } 373 #else 374 375 /* 376 * We have to check if we can exit, run timers, and then recheck. 377 */ 378 /** @todo 379 * The other thing we have to check is how long it is till the next timer 380 * can be serviced and not wait any longer than that. 381 */ 382 if (VM_FF_ISPENDING(pVM, fMask)) 383 break; 384 STAM_PROFILE_ADV_START(&pVM->vm.s.StatHaltTimers, b); 385 TMR3TimerQueuesDo(pVM); 386 STAM_PROFILE_ADV_STOP(&pVM->vm.s.StatHaltTimers, b); 387 if (VM_FF_ISPENDING(pVM, fMask)) 388 break; 389 /* hacking */ 390 RTThreadYield(); 391 TMR3TimerQueuesDo(pVM); 392 if (VM_FF_ISPENDING(pVM, fMask)) 393 break; 394 395 /* 396 * Wait for a while. Someone will wake us up or interrupt the call if 397 * anything needs our attention. 398 */ 399 STAM_PROFILE_ADV_START(&pVM->vm.s.StatHaltBlock, a); 400 rc = RTSemEventWait(pVM->vm.s.EventSemWait, 10); 401 STAM_PROFILE_ADV_STOP(&pVM->vm.s.StatHaltBlock, a); 402 #endif 372 uint64_t u64Slept = RTTimeNanoTS() - u64Start; 373 RTLogPrintf(" -> rc=%Vrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept); 374 } 403 375 if (rc == VERR_TIMEOUT) 404 376 rc = VINF_SUCCESS; … … 419 391 * and resume the yielder. 420 392 */ 421 // TMCpuTickPause(pVM);422 393 VMMR3YieldResume(pVM); 423 394 -
trunk/src/VBox/VMM/VMM.cpp
r2228 r2283 1747 1747 TMTimerStop(pVM->vmm.s.pYieldTimer); 1748 1748 } 1749 pVM->vmm.s.u64LastYield = RTTimeNanoTS(); 1749 1750 } 1750 1751 … … 1760 1761 TMTimerStop(pVM->vmm.s.pYieldTimer); 1761 1762 pVM->vmm.s.cYieldResumeMillies = pVM->vmm.s.cYieldEveryMillies; 1763 pVM->vmm.s.u64LastYield = RTTimeNanoTS(); 1762 1764 } 1763 1765 … … 1787 1789 static DECLCALLBACK(void) vmmR3YieldEMT(PVM pVM, PTMTIMER pTimer, void *pvUser) 1788 1790 { 1791 /* 1792 * This really needs some careful tuning. While we shouldn't be too gready since 1793 * that'll cause the rest of the system to stop up, we shouldn't be too nice either 1794 * because that'll cause us to stop up. 1795 * 1796 * The current logic is to use the default interval when there is no lag worth 1797 * mentioning, but when we start accumulating lag we don't bother yielding at all. 1798 * 1799 * (This depends on the TMCLOCK_VIRTUAL_SYNC to be scheduled before TMCLOCK_REAL 1800 * so the lag is up to date.) 1801 */ 1802 const uint64_t u64Lag = TMVirtualSyncGetLag(pVM); 1803 if ( u64Lag < 50000000 /* 50ms */ 1804 || ( u64Lag < 1000000000 /* 1s */ 1805 && RTTimeNanoTS() - pVM->vmm.s.u64LastYield < 500000000 /* 500 ms */) 1806 ) 1807 { 1808 uint64_t u64Elapsed = RTTimeNanoTS(); 1809 pVM->vmm.s.u64LastYield = u64Elapsed; 1810 1811 RTThreadYield(); 1812 1789 1813 #ifdef LOG_ENABLED 1790 uint64_t u64Elapsed = RTTimeNanoTS(); 1791 #endif 1792 RTThreadYield(); 1814 u64Elapsed = RTTimeNanoTS() - u64Elapsed; 1815 Log(("vmmR3YieldEMT: %RI64 ns\n", u64Elapsed)); 1816 #endif 1817 } 1793 1818 TMTimerSetMillies(pTimer, pVM->vmm.s.cYieldEveryMillies); 1794 Log(("vmmR3YieldEMT: %RI64 ns\n", RTTimeNanoTS() - u64Elapsed));1795 1819 } 1796 1820 -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r2248 r2283 161 161 const uint64_t u64Now = TMVirtualGet(pVM); 162 162 163 /* TMCLOCK_VIRTUAL */ 163 /* 164 * TMCLOCK_VIRTUAL 165 */ 164 166 const uint64_t u64Expire1 = pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire; 165 167 const int64_t i64Delta1 = u64Expire1 - u64Now; … … 175 177 } 176 178 177 /* TMCLOCK_VIRTUAL_SYNC */ 179 /* 180 * TMCLOCK_VIRTUAL_SYNC 181 * This isn't quite as stright forward if in a catch-up, not only do 182 * we have to adjust the 'now' but when have to adjust the delta as well. 183 */ 178 184 const uint64_t u64Expire2 = pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire; 179 const int64_t i64Delta2 = u64Expire2 - (u64Now - pVM->tm.s.u64VirtualSyncOffset); 185 uint64_t u64VirtualSyncNow; 186 if (!pVM->tm.s.fVirtualSyncTicking) 187 u64VirtualSyncNow = pVM->tm.s.u64VirtualSync; 188 else 189 { 190 if (!pVM->tm.s.fVirtualSyncCatchUp) 191 u64VirtualSyncNow = u64Now - pVM->tm.s.offVirtualSync; 192 else 193 { 194 uint64_t off = pVM->tm.s.offVirtualSync; 195 uint64_t u64Delta = u64Now - pVM->tm.s.u64VirtualSyncCatchUpPrev; 196 if (RT_LIKELY(!(u64Delta >> 32))) 197 { 198 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100); 199 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp) 200 off -= u64Sub; 201 else 202 off = pVM->tm.s.offVirtualSyncGivenUp; 203 } 204 u64VirtualSyncNow = u64Now - off; 205 } 206 } 207 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow; 180 208 if (i64Delta2 <= 0) 181 209 { … … 188 216 return 0; 189 217 } 218 if (pVM->tm.s.fVirtualSyncCatchUp) 219 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100); 190 220 191 221 /* 192 * Return the time selfto the next event.222 * Return the time left to the next event. 193 223 */ 194 224 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss); … … 359 389 Assert(!pTimer->offPrev); 360 390 Assert(!pTimer->offNext); 391 AssertMsg( pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC 392 || pTimer->CTXALLSUFF(pVM)->tm.s.fVirtualSyncTicking 393 || u64Expire >= pTimer->CTXALLSUFF(pVM)->tm.s.u64VirtualSync, 394 ("%RU64 < %RU64 %s\n", u64Expire, pTimer->CTXALLSUFF(pVM)->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc))); 361 395 pTimer->u64Expire = u64Expire; 362 396 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE); -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r2248 r2283 96 96 { 97 97 uint64_t u64; 98 if ( pVM->tm.s.fVirtualTicking)98 if (RT_LIKELY(pVM->tm.s.fVirtualTicking)) 99 99 { 100 100 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet); … … 108 108 && ( pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64 109 109 || ( pVM->tm.s.fVirtualSyncTicking 110 && pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s. u64VirtualSyncOffset110 && pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync 111 111 ) 112 112 ) … … 114 114 { 115 115 VM_FF_SET(pVM, VM_FF_TIMER); 116 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF); 116 117 #ifdef IN_RING3 117 118 REMR3NotifyTimerPending(pVM); … … 190 191 VMR3NotifyFF(pVM, true); 191 192 #endif 193 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF); 192 194 } 193 195 … … 210 212 * 211 213 * Assuming nano second virtual time, we can simply ignore any intervals which has 212 * any of the upper 32 bits set. This will have the nice sideeffect of allowing us 213 * to use (faster) 32-bit math. 214 * any of the upper 32 bits set. 214 215 */ 215 AssertCompile(TMCLOCK_FREQ_VIRTUAL <= 2000000000); /* (assumes low 32-bit >= 2 seconds) */216 uint64_t u64Offset = pVM->tm.s.u64VirtualSyncOffset;216 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000); 217 uint64_t off = pVM->tm.s.offVirtualSync; 217 218 if (pVM->tm.s.fVirtualSyncCatchUp) 218 219 { … … 221 222 if (RT_LIKELY(!(u64Delta >> 32))) 222 223 { 223 uint32_t u32Sub = ASMDivU64ByU32RetU32(ASMMult2xU32RetU64((uint32_t)u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage), 224 100); 225 if (u64Offset > u32Sub) 224 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100); 225 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp) 226 226 { 227 u64Offset -= u32Sub;228 ASMAtomicXchgU64(&pVM->tm.s. u64VirtualSyncOffset, u64Offset);227 off -= u64Sub; 228 ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off); 229 229 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64; 230 Log4(("TM: %RU64/%RU64: sub %RU32\n", u64 - off, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp, u64Sub)); 230 231 } 231 232 else 232 233 { 233 234 /* we've completely caught up. */ 234 u64Offset = 0; 235 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSyncOffset, 0); 235 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c); 236 off = pVM->tm.s.offVirtualSyncGivenUp; 237 ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off); 236 238 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false); 237 239 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64; 240 Log4(("TM: %RU64/0: caught up\n", u64)); 238 241 } 239 242 } … … 252 255 * set the the timer pending flag. 253 256 */ 254 u64 -= u64Offset;257 u64 -= off; 255 258 const uint64_t u64Expire = pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire; 256 259 if (u64 >= u64Expire) … … 266 269 VMR3NotifyFF(pVM, true); 267 270 #endif 271 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSyncSetFF); 272 Log4(("TM: %RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); 268 273 } 274 else 275 Log4(("TM: %RU64/%RU64: exp tmr\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); 269 276 } 270 277 } … … 283 290 TMDECL(uint64_t) TMVirtualSyncGetLag(PVM pVM) 284 291 { 285 return pVM->tm.s. u64VirtualSyncOffset;292 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp; 286 293 } 287 294 -
trunk/src/VBox/VMM/VMMInternal.h
r1210 r2283 238 238 uint32_t u32Padding0; /**< Alignment padding. */ 239 239 #endif 240 /** The timestamp of the previous yield. (nano) */ 241 uint64_t u64LastYield; 240 242 241 243 /** @name CallHost -
trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp
r2270 r2283 598 598 GEN_CHECK_OFF(TM, u64Virtual); 599 599 GEN_CHECK_OFF(TM, u64VirtualWarpDriveStart); 600 GEN_CHECK_OFF(TM, u64VirtualSyncOffset); 600 GEN_CHECK_OFF(TM, u64VirtualSync); 601 GEN_CHECK_OFF(TM, offVirtualSync); 602 GEN_CHECK_OFF(TM, offVirtualSyncGivenUp); 601 603 GEN_CHECK_OFF(TM, u64VirtualSyncCatchUpPrev); 602 GEN_CHECK_OFF(TM, u64VirtualSync);603 604 GEN_CHECK_OFF(TM, u32VirtualSyncCatchUpPercentage); 604 605 GEN_CHECK_OFF(TM, u32VirtualSyncScheduleSlack);
Note:
See TracChangeset
for help on using the changeset viewer.