Changeset 19444 in vbox
- Timestamp:
- May 6, 2009 4:21:00 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/TM.cpp
r19400 r19444 264 264 /* The rest is done in TMR3InitFinalize since it's too early to call PDM. */ 265 265 266 /* 267 * Init the lock. 268 */ 269 rc = PDMR3CritSectInit(pVM, &pVM->tm.s.EmtLock, "TM EMT Lock"); 270 if (RT_FAILURE(rc)) 271 return rc; 266 272 267 273 /* … … 923 929 LogFlow(("TMR3Reset:\n")); 924 930 VM_ASSERT_EMT(pVM); 931 tmLock(pVM); 925 932 926 933 /* … … 954 961 tmTimerQueuesSanityChecks(pVM, "TMR3Reset"); 955 962 #endif 963 956 964 VM_FF_CLEAR(pVM, VM_FF_TIMER); 965 tmUnlock(pVM); 957 966 } 958 967 … … 988 997 static DECLCALLBACK(int) tmR3Save(PVM pVM, PSSMHANDLE pSSM) 989 998 { 990 unsigned i;991 992 999 LogFlow(("tmR3Save:\n")); 993 1000 #ifdef VBOX_STRICT 994 for ( i=0;i<pVM->cCPUs;i++)1001 for (VMCPUID i = 0; i < pVM->cCPUs; i++) 995 1002 { 996 1003 PVMCPU pVCpu = &pVM->aCpus[i]; … … 1018 1025 SSMR3PutU64(pSSM, TMCLOCK_FREQ_REAL); 1019 1026 1020 for ( i=0;i<pVM->cCPUs;i++)1027 for (VMCPUID i = 0; i < pVM->cCPUs; i++) 1021 1028 { 1022 1029 PVMCPU pVCpu = &pVM->aCpus[i]; … … 1039 1046 static DECLCALLBACK(int) tmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version) 1040 1047 { 1041 unsigned i;1042 1048 LogFlow(("tmR3Load:\n")); 1043 1049 1044 1050 #ifdef VBOX_STRICT 1045 for ( i=0;i<pVM->cCPUs;i++)1051 for (VMCPUID i = 0; i < pVM->cCPUs; i++) 1046 1052 { 1047 1053 PVMCPU pVCpu = &pVM->aCpus[i]; … … 1106 1112 1107 1113 /* the cpu tick clock. */ 1108 for ( i=0;i<pVM->cCPUs;i++)1114 for (VMCPUID i = 0; i < pVM->cCPUs; i++) 1109 1115 { 1110 1116 PVMCPU pVCpu = &pVM->aCpus[i]; … … 1182 1188 1183 1189 /* insert into the list of created timers. */ 1190 tmLock(pVM); 1184 1191 pTimer->pBigPrev = NULL; 1185 1192 pTimer->pBigNext = pVM->tm.s.pCreated; … … 1190 1197 tmTimerQueuesSanityChecks(pVM, "tmR3TimerCreate"); 1191 1198 #endif 1199 tmUnlock(pVM); 1192 1200 1193 1201 *ppTimer = pTimer; … … 1332 1340 return VERR_INVALID_PARAMETER; 1333 1341 1342 tmLock(pVM); 1334 1343 PTMTIMER pCur = pVM->tm.s.pCreated; 1335 1344 while (pCur) … … 1344 1353 } 1345 1354 } 1355 tmUnlock(pVM); 1356 1346 1357 LogFlow(("TMR3TimerDestroyDevice: returns VINF_SUCCESS\n")); 1347 1358 return VINF_SUCCESS; … … 1362 1373 return VERR_INVALID_PARAMETER; 1363 1374 1375 tmLock(pVM); 1364 1376 PTMTIMER pCur = pVM->tm.s.pCreated; 1365 1377 while (pCur) … … 1374 1386 } 1375 1387 } 1388 tmUnlock(pVM); 1389 1376 1390 LogFlow(("TMR3TimerDestroyDriver: returns VINF_SUCCESS\n")); 1377 1391 return VINF_SUCCESS; … … 1476 1490 ) 1477 1491 && !VM_FF_ISSET(pVM, VM_FF_TIMER) 1492 && !pVM->tm.s.fRunningQueues 1478 1493 ) 1479 1494 { … … 1497 1512 VMMR3DECL(void) TMR3TimerQueuesDo(PVM pVM) 1498 1513 { 1514 /* 1515 * Only one EMT should be doing this at a time. 1516 */ 1517 VM_FF_CLEAR(pVM, VM_FF_TIMER); 1518 if (!ASMBitTestAndSet(&pVM->tm.s.fRunningQueues, 0)) 1519 return; 1520 1499 1521 STAM_PROFILE_START(&pVM->tm.s.StatDoQueues, a); 1500 1522 Log2(("TMR3TimerQueuesDo:\n")); 1501 1502 /* SMP: quick hack to fend of the wildlife... */ /** @todo SMP */ 1503 if ( pVM->cCPUs > 1 1504 && VMMGetCpuId(pVM) != 0) 1505 return; 1523 tmLock(pVM); 1506 1524 1507 1525 /* … … 1544 1562 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatDoQueuesRun, r3); 1545 1563 1546 /* done. */1547 VM_FF_CLEAR(pVM, VM_FF_TIMER);1548 1549 1564 #ifdef VBOX_STRICT 1550 1565 /* check that we didn't screwup. */ … … 1554 1569 Log2(("TMR3TimerQueuesDo: returns void\n")); 1555 1570 STAM_PROFILE_STOP(&pVM->tm.s.StatDoQueues, a); 1571 1572 /* done */ 1573 ASMAtomicBitClear(&pVM->tm.s.fRunningQueues, 0); 1574 tmUnlock(pVM); 1556 1575 } 1557 1576 … … 2043 2062 "Expire", 2044 2063 "State"); 2064 tmLock(pVM); 2045 2065 for (PTMTIMERR3 pTimer = pVM->tm.s.pCreated; pTimer; pTimer = pTimer->pBigNext) 2046 2066 { … … 2057 2077 pTimer->pszDesc); 2058 2078 } 2079 tmUnlock(pVM); 2059 2080 } 2060 2081 … … 2083 2104 for (unsigned iQueue = 0; iQueue < TMCLOCK_MAX; iQueue++) 2084 2105 { 2106 tmLock(pVM); 2085 2107 for (PTMTIMERR3 pTimer = TMTIMER_GET_HEAD(&pVM->tm.s.paTimerQueuesR3[iQueue]); 2086 2108 pTimer; … … 2105 2127 pTimer->pszDesc); 2106 2128 } 2129 tmUnlock(pVM); 2107 2130 } 2108 2131 } … … 2123 2146 * Read the times first to avoid more than necessary time variation. 2124 2147 */ 2125 const uint64_t u64Virtual = TMVirtualGet(pVM);2148 const uint64_t u64Virtual = TMVirtualGet(pVM); 2126 2149 const uint64_t u64VirtualSync = TMVirtualSyncGet(pVM); 2127 const uint64_t u64Real = TMRealGet(pVM);2150 const uint64_t u64Real = TMRealGet(pVM); 2128 2151 2129 2152 for (unsigned i = 0; i < pVM->cCPUs; i++) -
trunk/src/VBox/VMM/TMInternal.h
r19325 r19444 28 28 #include <iprt/timer.h> 29 29 #include <VBox/stam.h> 30 #include <VBox/pdmcritsect.h> 30 31 31 32 __BEGIN_DECLS … … 419 420 uint32_t u32TimerMillies; 420 421 421 /** Alignment padding to ensure that the statistics are 64-bit aligned when using GCC. */ 422 uint32_t u32Padding1; 422 /** Makes sure only one EMT is running the queues. */ 423 bool volatile fRunningQueues; 424 425 /** Lock serializing EMT access to TM. */ 426 PDMCRITSECT EmtLock; 423 427 424 428 /** TMR3TimerQueuesDo … … 518 522 typedef TMCPU *PTMCPU; 519 523 524 int tmLock(PVM pVM); 525 int tmTryLock(PVM pVM); 526 void tmUnlock(PVM pVM); 527 /** Checks that the caller owns the EMT lock. */ 528 #define TM_ASSERT_EMT_LOCK(pVM) Assert(PDMCritSectIsOwner(&pVM->tm.s.EmtLock)) 529 520 530 const char *tmTimerState(TMTIMERSTATE enmState); 521 531 void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue); -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r19400 r19444 46 46 47 47 /** 48 * Try take the EMT/TM lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC. 49 * 50 * @retval VINF_SUCCESS on success (always in ring-3). 51 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy. 52 * 53 * @param pVM The VM handle. 54 */ 55 int tmLock(PVM pVM) 56 { 57 VM_ASSERT_EMT(pVM); 58 int rc = PDMCritSectEnter(&pVM->tm.s.EmtLock, VERR_SEM_BUSY); 59 return rc; 60 } 61 62 63 /** 64 * Try take the EMT/TM lock, no waiting. 65 * 66 * @retval VINF_SUCCESS on success. 67 * @retval VERR_SEM_BUSY if busy. 68 * 69 * @param pVM The VM handle. 70 */ 71 int tmTryLock(PVM pVM) 72 { 73 VM_ASSERT_EMT(pVM); 74 int rc = PDMCritSectTryEnter(&pVM->tm.s.EmtLock); 75 return rc; 76 } 77 78 79 /** 80 * Release EMT/TM lock. 81 * 82 * @param pVM The VM handle. 83 */ 84 void tmUnlock(PVM pVM) 85 { 86 PDMCritSectLeave(&pVM->tm.s.EmtLock); 87 } 88 89 90 /** 48 91 * Notification that execution is about to start. 49 92 * … … 129 172 { 130 173 PVM pVM = pTimer->CTX_SUFF(pVM); 131 if (VM_IS_EMT(pVM)) 174 if ( VM_IS_EMT(pVM) 175 && RT_SUCCESS(tmTryLock(pVM))) 132 176 { 133 177 STAM_PROFILE_START(&pVM->tm.s.CTXALLSUFF(StatScheduleOne), a); … … 139 183 #endif 140 184 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a); 185 tmUnlock(pVM); 141 186 } 142 187 else if (!VM_FF_ISSET(pVM, VM_FF_TIMER)) /**@todo only do this when arming the timer. */ … … 226 271 VMMDECL(uint64_t) TMTimerPoll(PVM pVM) 227 272 { 273 int rc = tmLock(pVM); /* play safe for now */ 274 228 275 /* 229 276 * Return straight away if the timer FF is already set. … … 232 279 { 233 280 STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet); 281 #ifndef IN_RING3 282 if (RT_SUCCESS(rc)) 283 #endif 284 tmUnlock(pVM); 234 285 return 0; 235 286 } … … 247 298 if (i64Delta1 <= 0) 248 299 { 300 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtual); 249 301 LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now)); 250 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtual); 302 #ifndef IN_RING3 303 if (RT_SUCCESS(rc)) 304 #endif 305 tmUnlock(pVM); 251 306 VM_FF_SET(pVM, VM_FF_TIMER); 252 307 #ifdef IN_RING3 … … 287 342 if (i64Delta2 <= 0) 288 343 { 344 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync); 345 #ifndef IN_RING3 346 if (RT_SUCCESS(rc)) 347 #endif 348 tmUnlock(pVM); 289 349 LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now)); 290 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);291 350 VM_FF_SET(pVM, VM_FF_TIMER); 292 351 #ifdef IN_RING3 … … 302 361 */ 303 362 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss); 363 #ifndef IN_RING3 364 if (RT_SUCCESS(rc)) 365 #endif 366 tmUnlock(pVM); 304 367 return RT_MIN(i64Delta1, i64Delta2); 305 368 } … … 320 383 VMMDECL(uint64_t) TMTimerPollGIP(PVM pVM, uint64_t *pu64Delta) 321 384 { 385 int rc = tmLock(pVM); /* play safe for now. */ 386 322 387 /* 323 388 * Return straight away if the timer FF is already set. … … 326 391 { 327 392 STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet); 393 #ifndef IN_RING3 394 if (RT_SUCCESS(rc)) 395 #endif 396 tmUnlock(pVM); 328 397 *pu64Delta = 0; 329 398 return 0; … … 333 402 * Get current time and check the expire times of the two relevant queues. 334 403 */ 335 const uint64_t u64Now = TMVirtualGet(pVM);404 const uint64_t u64Now = TMVirtualGet(pVM); 336 405 337 406 /* 338 407 * TMCLOCK_VIRTUAL 339 408 */ 340 const uint64_t u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;341 const int64_t i64Delta1= u64Expire1 - u64Now;409 const uint64_t u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire; 410 const int64_t i64Delta1 = u64Expire1 - u64Now; 342 411 if (i64Delta1 <= 0) 343 412 { 413 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtual); 414 #ifndef IN_RING3 415 if (RT_SUCCESS(rc)) 416 #endif 417 tmUnlock(pVM); 344 418 LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now)); 345 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtual);346 419 VM_FF_SET(pVM, VM_FF_TIMER); 347 420 #ifdef IN_RING3 … … 357 430 * we have to adjust the 'now' but when have to adjust the delta as well. 358 431 */ 359 const uint64_t u64Expire2 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire;360 uint64_t u64VirtualSyncNow;432 const uint64_t u64Expire2 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire; 433 uint64_t u64VirtualSyncNow; 361 434 if (!pVM->tm.s.fVirtualSyncTicking) 362 435 u64VirtualSyncNow = pVM->tm.s.u64VirtualSync; … … 380 453 } 381 454 } 455 382 456 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow; 383 457 if (i64Delta2 <= 0) 384 458 { 459 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync); 460 #ifndef IN_RING3 461 if (RT_SUCCESS(rc)) 462 #endif 463 tmUnlock(pVM); 385 464 LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now)); 386 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);387 465 VM_FF_SET(pVM, VM_FF_TIMER); 388 466 #ifdef IN_RING3 … … 410 488 u64GipTime += pVM->tm.s.u64VirtualWarpDriveStart; 411 489 } 490 491 #ifndef IN_RING3 492 if (RT_SUCCESS(rc)) 493 #endif 494 tmUnlock(pVM); 412 495 return u64GipTime; 413 496 } … … 1291 1374 * @param pQueue The timer queue. 1292 1375 * @param pTimer The timer that needs scheduling. 1376 * 1377 * @remarks Called while owning the lock. 1293 1378 */ 1294 1379 DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer) … … 1485 1570 * @param pVM The VM to run the timers for. 1486 1571 * @param pQueue The queue to schedule. 1572 * 1573 * @remarks Called while owning the lock. 1487 1574 */ 1488 1575 void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue) 1489 1576 { 1490 VM_ASSERT_EMT(pVM);1577 TM_ASSERT_EMT_LOCK(pVM); 1491 1578 1492 1579 /* … … 1523 1610 * 1524 1611 * @param pVM VM handle. 1612 * 1613 * @remarks Called while owning the lock. 1525 1614 */ 1526 1615 void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere) 1527 1616 { 1617 TM_ASSERT_EMT_LOCK(pVM); 1618 1528 1619 /* 1529 1620 * Check the linking of the active lists. -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r19415 r19444 337 337 if ( fCheckTimers 338 338 && !VM_FF_ISSET(pVM, VM_FF_TIMER) 339 && !pVM->tm.s.fRunningQueues 339 340 && ( pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64 340 341 || ( pVM->tm.s.fVirtualSyncTicking … … 371 372 VMMDECL(uint64_t) TMVirtualGet(PVM pVM) 372 373 { 373 return TMVirtualGetEx(pVM, true /* check timers */);374 return tmVirtualGet(pVM, true /* check timers */); 374 375 } 375 376 … … 403 404 VMMDECL(uint64_t) TMVirtualSyncGetEx(PVM pVM, bool fCheckTimers) 404 405 { 405 VM_ASSERT_EMT(pVM);406 407 406 uint64_t u64; 408 407 if (pVM->tm.s.fVirtualSyncTicking) … … 449 448 if (pVM->tm.s.fVirtualSyncCatchUp) 450 449 { 450 int rc = tmTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. */ 451 451 452 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev; 452 453 uint64_t u64Delta = u64 - u64Prev; … … 479 480 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta)); 480 481 } 482 483 if (RT_SUCCESS(rc)) 484 tmUnlock(pVM); 481 485 } 482 486 … … 491 495 { 492 496 u64 = u64Expire; 493 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64); 494 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false); 497 int rc = tmTryLock(pVM); /** @todo SMP: Here be dragons... Need to get back to this later. */ 498 if (RT_SUCCESS(rc)) 499 { 500 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64); 501 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false); 502 tmUnlock(pVM); 503 } 495 504 if ( fCheckTimers 496 505 && !VM_FF_ISSET(pVM, VM_FF_TIMER)) … … 597 606 VMMDECL(int) TMVirtualResume(PVM pVM) 598 607 { 599 /** @note this is done only in specific cases (vcpu 0 init, termination, debug, out of memory conditions; 600 * there is at least a race for fVirtualSyncTicking. 608 /* 609 * Note! this is done only in specific cases (vcpu 0 init, termination, debug, 610 * out of memory conditions; there is at least a race for fVirtualSyncTicking. 601 611 */ 602 612 if (ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking) == 1) 603 613 { 614 int rc = tmLock(pVM); /* paranoia */ 615 604 616 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume); 605 617 pVM->tm.s.u64VirtualRawPrev = 0; … … 607 619 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual; 608 620 pVM->tm.s.fVirtualSyncTicking = true; 621 622 if (RT_SUCCESS(rc)) 623 tmUnlock(pVM); 609 624 return VINF_SUCCESS; 610 625 } … … 623 638 VMMDECL(int) TMVirtualPause(PVM pVM) 624 639 { 625 /** @note this is done only in specific cases (vcpu 0 init, termination, debug, out of memory conditions; 626 * there is at least a race for fVirtualSyncTicking. 640 /* 641 * Note! this is done only in specific cases (vcpu 0 init, termination, debug, 642 * out of memory conditions; there is at least a race for fVirtualSyncTicking. 627 643 */ 628 644 if (ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking) == 0) 629 645 { 646 int rc = tmLock(pVM); /* paranoia */ 647 630 648 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause); 631 649 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM); 632 650 pVM->tm.s.fVirtualSyncTicking = false; 651 652 if (RT_SUCCESS(rc)) 653 tmUnlock(pVM); 633 654 return VINF_SUCCESS; 634 655 } … … 693 714 ("%RX32 is not between 2 and 20000 (inclusive).\n", u32Percent), 694 715 VERR_INVALID_PARAMETER); 716 tmLock(pVM); 695 717 696 718 /* … … 702 724 { 703 725 int rc = TMVirtualPause(pVM); 704 AssertRC Return(rc,rc);726 AssertRC(rc); 705 727 rc = TMCpuTickPause(pVCpu); 706 AssertRC Return(rc,rc);728 AssertRC(rc); 707 729 } 708 730 … … 715 737 { 716 738 int rc = TMVirtualResume(pVM); 717 AssertRC Return(rc,rc);739 AssertRC(rc); 718 740 rc = TMCpuTickResume(pVCpu); 719 AssertRCReturn(rc, rc); 720 } 721 741 AssertRC(rc); 742 } 743 744 tmUnlock(pVM); 722 745 return VINF_SUCCESS; 723 746 }
Note:
See TracChangeset
for help on using the changeset viewer.