Changeset 20050 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 26, 2009 5:12:12 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/TM.cpp
r19820 r20050 396 396 else 397 397 CPUMR3SetCR4Feature(pVM, 0, ~X86_CR4_TSD); 398 LogRel(("TM: cTSCTicksPerSecond=%#RX64 (% RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool\n"398 LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%'RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool\n" 399 399 "TM: fMaybeUseOffsettedHostTSC=%RTbool TSCTiedToExecution=%RTbool TSCNotTiedToHalt=%RTbool\n", 400 400 pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.fTSCVirtualized, pVM->tm.s.fTSCUseRealTSC, … … 447 447 if ( (iPeriod > 0 && u64 <= pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod - 1].u64Start) \ 448 448 || u64 >= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold) \ 449 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Configuration error: Invalid start of period #" #iPeriod ": % RU64"), u64); \449 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Configuration error: Invalid start of period #" #iPeriod ": %'RU64"), u64); \ 450 450 pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u64Start = u64; \ 451 451 rc = CFGMR3QueryU32(pCfgHandle, "CatchUpPrecentage" #iPeriod, &pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u32Percentage); \ … … 542 542 STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataR0.c1nsSteps,STAMTYPE_U32, "/TM/R0/1nsSteps", STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations)."); 543 543 STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataR0.cBadPrev, STAMTYPE_U32, "/TM/R0/cBadPrev", STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen)."); 544 STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataRC.c1nsSteps,STAMTYPE_U32, "/TM/ GC/1nsSteps", STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations).");545 STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataRC.cBadPrev, STAMTYPE_U32, "/TM/ GC/cBadPrev", STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen).");544 STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataRC.c1nsSteps,STAMTYPE_U32, "/TM/RC/1nsSteps", STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations)."); 545 STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.VirtualGetRawDataRC.cBadPrev, STAMTYPE_U32, "/TM/RC/cBadPrev", STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen)."); 546 546 STAM_REL_REG( pVM,(void*)&pVM->tm.s.offVirtualSync, STAMTYPE_U64, "/TM/VirtualSync/CurrentOffset", STAMUNIT_NS, "The current offset. (subtract GivenUp to get the lag)"); 547 547 STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.offVirtualSyncGivenUp, STAMTYPE_U64, "/TM/VirtualSync/GivenUp", STAMUNIT_NS, "Nanoseconds of the 'CurrentOffset' that's been given up and won't ever be attemted caught up with."); … … 552 552 STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataR0.cExpired, STAMTYPE_U32, "/TM/R0/cExpired", STAMUNIT_OCCURENCES, "Times the TSC interval expired (overlaps 1ns steps)."); 553 553 STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataR0.cUpdateRaces,STAMTYPE_U32, "/TM/R0/cUpdateRaces", STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp."); 554 STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataRC.cExpired, STAMTYPE_U32, "/TM/ GC/cExpired", STAMUNIT_OCCURENCES, "Times the TSC interval expired (overlaps 1ns steps).");555 STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataRC.cUpdateRaces,STAMTYPE_U32, "/TM/ GC/cUpdateRaces", STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp.");554 STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataRC.cExpired, STAMTYPE_U32, "/TM/RC/cExpired", STAMUNIT_OCCURENCES, "Times the TSC interval expired (overlaps 1ns steps)."); 555 STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataRC.cUpdateRaces,STAMTYPE_U32, "/TM/RC/cUpdateRaces", STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp."); 556 556 STAM_REG(pVM, &pVM->tm.s.StatDoQueues, STAMTYPE_PROFILE, "/TM/DoQueues", STAMUNIT_TICKS_PER_CALL, "Profiling timer TMR3TimerQueuesDo."); 557 557 STAM_REG(pVM, &pVM->tm.s.aStatDoQueues[TMCLOCK_VIRTUAL], STAMTYPE_PROFILE_ADV, "/TM/DoQueues/Virtual", STAMUNIT_TICKS_PER_CALL, "Time spent on the virtual clock queue."); … … 968 968 ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSync, offNew); 969 969 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false); 970 LogRel(("TM: Aborting catch-up attempt on reset with a % RU64 ns lag on reset; new total: %RU64 ns\n", offNew - offOld, offNew));970 LogRel(("TM: Aborting catch-up attempt on reset with a %'RU64 ns lag on reset; new total: %'RU64 ns\n", offNew - offOld, offNew)); 971 971 } 972 972 } … … 1098 1098 if (u64Hz != TMCLOCK_FREQ_VIRTUAL) 1099 1099 { 1100 AssertMsgFailed(("The virtual clock frequency differs! Saved: % RU64 Binary: %RU64\n",1100 AssertMsgFailed(("The virtual clock frequency differs! Saved: %'RU64 Binary: %'RU64\n", 1101 1101 u64Hz, TMCLOCK_FREQ_VIRTUAL)); 1102 1102 return VERR_SSM_VIRTUAL_CLOCK_HZ; … … 1126 1126 if (u64Hz != TMCLOCK_FREQ_REAL) 1127 1127 { 1128 AssertMsgFailed(("The real clock frequency differs! Saved: % RU64 Binary: %RU64\n",1128 AssertMsgFailed(("The real clock frequency differs! Saved: %'RU64 Binary: %'RU64\n", 1129 1129 u64Hz, TMCLOCK_FREQ_REAL)); 1130 1130 return VERR_SSM_VIRTUAL_CLOCK_HZ; /* missleading... */ … … 1149 1149 pVM->tm.s.cTSCTicksPerSecond = u64Hz; 1150 1150 1151 LogRel(("TM: cTSCTicksPerSecond=%#RX64 (% RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool (state load)\n",1151 LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%'RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool (state load)\n", 1152 1152 pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.fTSCVirtualized, pVM->tm.s.fTSCUseRealTSC)); 1153 1153 … … 1386 1386 { 1387 1387 case TMTIMERSTATE_STOPPED: 1388 case TMTIMERSTATE_EXPIRED :1388 case TMTIMERSTATE_EXPIRED_DELIVER: 1389 1389 break; 1390 1390 … … 1407 1407 * This shouldn't happen as the caller should make sure there are no races. 1408 1408 */ 1409 case TMTIMERSTATE_EXPIRED_GET_UNLINK: 1409 1410 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE: 1410 1411 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: … … 1803 1804 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, pTimer->u64Expire, u64Now, pTimer->pszDesc)); 1804 1805 bool fRc; 1805 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED , TMTIMERSTATE_ACTIVE, fRc);1806 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED_GET_UNLINK, TMTIMERSTATE_ACTIVE, fRc); 1806 1807 if (fRc) 1807 1808 { … … 1822 1823 pTimer->offPrev = 0; 1823 1824 1824 1825 1825 /* fire */ 1826 TM_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED_DELIVER); 1826 1827 // tmUnlock(pVM); 1827 1828 switch (pTimer->enmType) … … 1843 1844 1844 1845 /* change the state if it wasn't changed already in the handler. */ 1845 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED , fRc);1846 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED_DELIVER, fRc); 1846 1847 Log2(("tmR3TimerQueueRun: new state %s\n", tmTimerState(pTimer->enmState))); 1847 1848 } … … 1902 1903 { 1903 1904 /* Calc 'now'. */ 1904 bool fStopCatchup= false;1905 bool fUpdateStuff= false;1906 uint64_t off= pVM->tm.s.offVirtualSync;1905 bool fStopCatchup = false; 1906 bool fUpdateStuff = false; 1907 uint64_t off = pVM->tm.s.offVirtualSync; 1907 1908 if (pVM->tm.s.fVirtualSyncCatchUp) 1908 1909 { … … 1914 1915 { 1915 1916 off -= u64Sub; 1916 Log4(("TM: % RU64/%RU64: sub %RU64 (run)\n", u64VirtualNow - off, off - offSyncGivenUp, u64Sub));1917 Log4(("TM: %'RU64/-%'8RU64: sub %'RU64 [tmR3TimerQueueRunVirtualSync]\n", u64VirtualNow - off, off - offSyncGivenUp, u64Sub)); 1917 1918 } 1918 1919 else … … 1921 1922 fStopCatchup = true; 1922 1923 off = offSyncGivenUp; 1923 Log4(("TM: %RU64/0: caught up (run)\n", u64VirtualNow));1924 1924 } 1925 1925 } … … 1935 1935 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64Now); 1936 1936 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false); 1937 Log4(("TM: % RU64/%RU64: exp tmr (run)\n", u64Now, u64VirtualNow - u64Now - offSyncGivenUp));1937 Log4(("TM: %'RU64/-%'8RU64: exp tmr [tmR3TimerQueueRunVirtualSync]\n", u64Now, u64VirtualNow - u64Now - offSyncGivenUp)); 1938 1938 } 1939 1939 else if (fUpdateStuff) … … 1942 1942 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64VirtualNow); 1943 1943 if (fStopCatchup) 1944 { 1944 1945 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false); 1946 Log4(("TM: %'RU64/0: caught up [tmR3TimerQueueRunVirtualSync]\n", u64VirtualNow)); 1947 } 1945 1948 } 1946 1949 } … … 1970 1973 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, pTimer->u64Expire, u64Now, pTimer->pszDesc)); 1971 1974 bool fRc; 1972 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED , TMTIMERSTATE_ACTIVE, fRc);1975 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED_GET_UNLINK, TMTIMERSTATE_ACTIVE, fRc); 1973 1976 if (fRc) 1974 1977 { … … 1989 1992 /* advance the clock - don't permit timers to be out of order or armed in the 'past'. */ 1990 1993 #ifdef VBOX_STRICT 1991 AssertMsg(pTimer->u64Expire >= u64Prev, ("% RU64 < %RU64 %s\n", pTimer->u64Expire, u64Prev, pTimer->pszDesc));1994 AssertMsg(pTimer->u64Expire >= u64Prev, ("%'RU64 < %'RU64 %s\n", pTimer->u64Expire, u64Prev, pTimer->pszDesc)); 1992 1995 u64Prev = pTimer->u64Expire; 1993 1996 #endif … … 1996 1999 1997 2000 /* fire */ 2001 TM_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED_DELIVER); 1998 2002 switch (pTimer->enmType) 1999 2003 { … … 2008 2012 2009 2013 /* change the state if it wasn't changed already in the handler. */ 2010 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED , fRc);2014 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED_DELIVER, fRc); 2011 2015 Log2(("tmR3TimerQueueRun: new state %s\n", tmTimerState(pTimer->enmState))); 2012 2016 } … … 2025 2029 const uint64_t u64VirtualNow2 = TMVirtualGetNoCheck(pVM); 2026 2030 Assert(u64VirtualNow2 >= u64VirtualNow); 2027 AssertMsg(pVM->tm.s.u64VirtualSync >= u64Now, ("% RU64 < %RU64\n", pVM->tm.s.u64VirtualSync, u64Now));2031 AssertMsg(pVM->tm.s.u64VirtualSync >= u64Now, ("%'RU64 < %'RU64\n", pVM->tm.s.u64VirtualSync, u64Now)); 2028 2032 const uint64_t offSlack = pVM->tm.s.u64VirtualSync - u64Now; 2029 2033 STAM_STATS({ … … 2068 2072 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c); 2069 2073 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false); 2070 Log4(("TM: % RU64/%RU64: caught up\n", u64VirtualNow2 - offNew, offLag));2074 Log4(("TM: %'RU64/-%'8RU64: caught up [pt]\n", u64VirtualNow2 - offNew, offLag)); 2071 2075 } 2072 2076 else if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold) … … 2081 2085 STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupAdjust[i]); 2082 2086 ASMAtomicWriteU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage); 2083 Log4(("TM: % RU64/%RU64: adj %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));2087 Log4(("TM: %'RU64/%'8RU64: adj %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage)); 2084 2088 } 2085 2089 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64VirtualNow2; … … 2092 2096 ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew); 2093 2097 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false); 2094 Log4(("TM: % RU64/%RU64: give up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));2095 LogRel(("TM: Giving up catch-up attempt at a % RU64 ns lag; new total: %RU64 ns\n", offLag, offNew));2098 Log4(("TM: %'RU64/%'8RU64: give up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage)); 2099 LogRel(("TM: Giving up catch-up attempt at a %'RU64 ns lag; new total: %'RU64 ns\n", offLag, offNew)); 2096 2100 } 2097 2101 } … … 2109 2113 ASMAtomicWriteU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage); 2110 2114 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, true); 2111 Log4(("TM: % RU64/%RU64: catch-up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));2115 Log4(("TM: %'RU64/%'8RU64: catch-up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage)); 2112 2116 } 2113 2117 else … … 2116 2120 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting); 2117 2121 ASMAtomicWriteU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew); 2118 Log4(("TM: % RU64/%RU64: give up\n", u64VirtualNow2 - offNew, offLag));2119 LogRel(("TM: Not bothering to attempt catching up a % RU64 ns lag; new total: %RU64\n", offLag, offNew));2122 Log4(("TM: %'RU64/%'8RU64: give up\n", u64VirtualNow2 - offNew, offLag)); 2123 LogRel(("TM: Not bothering to attempt catching up a %'RU64 ns lag; new total: %'RU64\n", offLag, offNew)); 2120 2124 } 2121 2125 } … … 2231 2235 return SSMR3PutU64(pSSM, pTimer->u64Expire); 2232 2236 2233 case TMTIMERSTATE_EXPIRED: 2237 case TMTIMERSTATE_EXPIRED_GET_UNLINK: 2238 case TMTIMERSTATE_EXPIRED_DELIVER: 2234 2239 case TMTIMERSTATE_DESTROY: 2235 2240 case TMTIMERSTATE_FREE: -
trunk/src/VBox/VMM/TMInternal.h
r19820 r20050 70 70 /** Timer is active. */ 71 71 TMTIMERSTATE_ACTIVE, 72 /** Timer is expired, is being delivered. */ 73 TMTIMERSTATE_EXPIRED, 72 /** Timer is expired, getting expire and unlinking. */ 73 TMTIMERSTATE_EXPIRED_GET_UNLINK, 74 /** Timer is expired and is being delivered. */ 75 TMTIMERSTATE_EXPIRED_DELIVER, 74 76 75 77 /** Timer is stopped but still in the active list. -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r19821 r20050 466 466 #endif 467 467 } 468 LogFlow(("TMTimerPoll: expire1=% RU64 <= now=%RU64\n", u64Expire1, u64Now));468 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now)); 469 469 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual); 470 470 } … … 514 514 515 515 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple); 516 LogFlow(("TMTimerPoll: expire2=% RU64 <= now=%RU64\n", u64Expire2, u64Now));516 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now)); 517 517 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync); 518 518 } … … 612 612 } 613 613 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync); 614 LogFlow(("TMTimerPoll: expire2=% RU64 <= now=%RU64\n", u64Expire2, u64Now));614 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now)); 615 615 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync); 616 616 } … … 740 740 */ 741 741 TMTIMERSTATE enmState = pTimer->enmState; 742 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=% llu\n",742 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n", 743 743 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire)); 744 744 switch (enmState) 745 745 { 746 case TMTIMERSTATE_EXPIRED :746 case TMTIMERSTATE_EXPIRED_DELIVER: 747 747 case TMTIMERSTATE_STOPPED: 748 748 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState)) … … 753 753 || pTimer->CTX_SUFF(pVM)->tm.s.fVirtualSyncTicking 754 754 || u64Expire >= pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync, 755 ("% RU64 < %RU64 %s\n", u64Expire, pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));755 ("%'RU64 < %'RU64 %s\n", u64Expire, pTimer->CTX_SUFF(pVM)->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc))); 756 756 pTimer->u64Expire = u64Expire; 757 757 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE); … … 799 799 800 800 801 case TMTIMERSTATE_EXPIRED_GET_UNLINK: 801 802 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE: 802 803 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: … … 953 954 switch (enmState) 954 955 { 955 case TMTIMERSTATE_EXPIRED :956 case TMTIMERSTATE_EXPIRED_DELIVER: 956 957 //AssertMsgFailed(("You don't stop an expired timer dude!\n")); 957 958 return VERR_INVALID_PARAMETER; … … 989 990 break; 990 991 992 case TMTIMERSTATE_EXPIRED_GET_UNLINK: 991 993 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE: 992 994 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: … … 1051 1053 return ~(uint64_t)0; 1052 1054 } 1053 //Log2(("TMTimerGet: returns % llu(pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",1055 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n", 1054 1056 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc))); 1055 1057 return u64; … … 1330 1332 switch (enmState) 1331 1333 { 1332 case TMTIMERSTATE_EXPIRED: 1334 case TMTIMERSTATE_EXPIRED_GET_UNLINK: 1335 case TMTIMERSTATE_EXPIRED_DELIVER: 1333 1336 case TMTIMERSTATE_STOPPED: 1334 1337 case TMTIMERSTATE_PENDING_STOP: … … 1341 1344 case TMTIMERSTATE_PENDING_RESCHEDULE: 1342 1345 case TMTIMERSTATE_PENDING_SCHEDULE: 1343 Log2(("TMTimerGetExpire: returns % llu(pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",1346 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n", 1344 1347 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc))); 1345 1348 return pTimer->u64Expire; … … 1388 1391 { 1389 1392 case TMTIMERSTATE_STOPPED: 1390 case TMTIMERSTATE_EXPIRED: 1393 case TMTIMERSTATE_EXPIRED_GET_UNLINK: 1394 case TMTIMERSTATE_EXPIRED_DELIVER: 1391 1395 case TMTIMERSTATE_PENDING_STOP: 1392 1396 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: … … 1436 1440 CASE( 1,STOPPED); 1437 1441 CASE( 2,ACTIVE); 1438 CASE( 3,EXPIRED); 1439 CASE( 4,PENDING_STOP); 1440 CASE( 5,PENDING_STOP_SCHEDULE); 1441 CASE( 6,PENDING_SCHEDULE_SET_EXPIRE); 1442 CASE( 7,PENDING_SCHEDULE); 1443 CASE( 8,PENDING_RESCHEDULE_SET_EXPIRE); 1444 CASE( 9,PENDING_RESCHEDULE); 1445 CASE(10,DESTROY); 1446 CASE(11,FREE); 1442 CASE( 3,EXPIRED_GET_UNLINK); 1443 CASE( 4,EXPIRED_DELIVER); 1444 CASE( 5,PENDING_STOP); 1445 CASE( 6,PENDING_STOP_SCHEDULE); 1446 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE); 1447 CASE( 8,PENDING_SCHEDULE); 1448 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE); 1449 CASE(10,PENDING_RESCHEDULE); 1450 CASE(11,DESTROY); 1451 CASE(12,FREE); 1447 1452 default: 1448 1453 AssertMsgFailed(("Invalid state enmState=%d\n", enmState)); … … 1597 1602 case TMTIMERSTATE_STOPPED: 1598 1603 case TMTIMERSTATE_ACTIVE: 1599 case TMTIMERSTATE_EXPIRED: 1604 case TMTIMERSTATE_EXPIRED_GET_UNLINK: 1605 case TMTIMERSTATE_EXPIRED_DELIVER: 1600 1606 default: 1601 1607 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!", … … 1623 1629 */ 1624 1630 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0); 1625 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32 }\n", pQueue, pQueue->enmClock, offNext));1631 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire)); 1626 1632 if (!offNext) 1627 1633 return; … … 1644 1650 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState))); 1645 1651 } /* foreach timer in current schedule batch. */ 1652 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire)); 1646 1653 } 1647 1654 … … 1720 1727 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: 1721 1728 case TMTIMERSTATE_STOPPED: 1722 case TMTIMERSTATE_EXPIRED :1729 case TMTIMERSTATE_EXPIRED_DELIVER: 1723 1730 { 1724 1731 Assert(!pCur->offNext); … … 1740 1747 1741 1748 /* shouldn't get here! */ 1749 case TMTIMERSTATE_EXPIRED_GET_UNLINK: 1742 1750 case TMTIMERSTATE_DESTROY: 1743 1751 default: -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r19810 r20050 419 419 { 420 420 off -= u64Sub; 421 Log4(("TM: % RU64/%RU64: sub %RU32\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));421 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub)); 422 422 } 423 423 else … … 427 427 off = pVM->tm.s.offVirtualSyncGivenUp; 428 428 fStop = true; 429 Log4(("TM: % RU64/0: caught up\n", u64));429 Log4(("TM: %'RU64/0: caught up [vsghcul]\n", u64)); 430 430 } 431 431 } … … 465 465 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 466 466 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 467 Log4(("TM: % RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));467 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); 468 468 tmVirtualSyncUnlock(pVM); 469 469 … … 477 477 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked); 478 478 479 Log6(("tmVirtualSyncGetHandleCatchUpLocked -> %'RU64\n", u64)); 479 480 return u64; 480 481 } … … 498 499 tmVirtualSyncUnlock(pVM); 499 500 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked); 501 Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64)); 500 502 return u64; 501 503 } … … 526 528 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu]; 527 529 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 528 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));529 Log4(("TM: % RU64/%RU64: exp tmr=>ff\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));530 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, !!VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 531 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); 530 532 tmVirtualSyncUnlock(pVM); 531 533 … … 538 540 } 539 541 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked); 542 Log6(("tmVirtualSyncGetLocked -> %'RU64\n", u64)); 540 543 return u64; 541 544 } … … 596 599 { 597 600 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless); 601 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off)); 598 602 return off; 599 603 } … … 607 611 { 608 612 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless); 613 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off)); 609 614 return off; 610 615 } … … 645 650 && cOuterTries > 0) 646 651 continue; 652 Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off)); 647 653 return off; 648 654 } … … 668 674 { 669 675 off -= u64Sub; 670 Log4(("TM: % RU64/%RU64: sub %RU32 (NoLock)\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));676 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub)); 671 677 } 672 678 else … … 675 681 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c); 676 682 off = offGivenUp; 677 Log4(("TM: % RU64/0: caught up\n", u64));683 Log4(("TM: %'RU64/0: caught up [NoLock]\n", u64)); 678 684 } 679 685 } … … 719 725 #endif 720 726 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF); 721 Log4(("TM: % RU64/%RU64: exp tmr=>ff (NoLock)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));727 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); 722 728 } 723 729 else 724 Log4(("TM: % RU64/%RU64: exp tmr (NoLock)\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));730 Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp)); 725 731 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired); 726 732 } 727 733 734 Log6(("tmVirtualSyncGetEx -> %'RU64\n", u64)); 728 735 return u64; 729 736 }
Note:
See TracChangeset
for help on using the changeset viewer.