Changeset 19810 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 19, 2009 9:59:20 AM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/TM.cpp
r19803 r19810 561 561 STAM_REG(pVM, &pVM->tm.s.StatPoll, STAMTYPE_COUNTER, "/TM/Poll", STAMUNIT_OCCURENCES, "TMTimerPoll calls."); 562 562 STAM_REG(pVM, &pVM->tm.s.StatPollAlreadySet, STAMTYPE_COUNTER, "/TM/Poll/AlreadySet", STAMUNIT_OCCURENCES, "TMTimerPoll calls where the FF was already set."); 563 STAM_REG(pVM, &pVM->tm.s.StatPollELoop, STAMTYPE_COUNTER, "/TM/Poll/ELoop", STAMUNIT_OCCURENCES, "Times TMTimerPoll has given up getting a consistent virtual sync data set."); 564 STAM_REG(pVM, &pVM->tm.s.StatPollMiss, STAMTYPE_COUNTER, "/TM/Poll/Miss", STAMUNIT_OCCURENCES, "TMTimerPoll calls where nothing had expired."); 565 STAM_REG(pVM, &pVM->tm.s.StatPollRunning, STAMTYPE_COUNTER, "/TM/Poll/Running", STAMUNIT_OCCURENCES, "TMTimerPoll calls where the queues were being run."); 566 STAM_REG(pVM, &pVM->tm.s.StatPollSimple, STAMTYPE_COUNTER, "/TM/Poll/Simple", STAMUNIT_OCCURENCES, "TMTimerPoll calls where we could take the simple path."); 563 567 STAM_REG(pVM, &pVM->tm.s.StatPollVirtual, STAMTYPE_COUNTER, "/TM/Poll/HitsVirtual", STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL queue."); 564 568 STAM_REG(pVM, &pVM->tm.s.StatPollVirtualSync, STAMTYPE_COUNTER, "/TM/Poll/HitsVirtualSync", STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL_SYNC queue."); 565 STAM_REG(pVM, &pVM->tm.s.StatPollMiss, STAMTYPE_COUNTER, "/TM/Poll/Miss", STAMUNIT_OCCURENCES, "TMTimerPoll calls where nothing had expired.");566 STAM_REG(pVM, &pVM->tm.s.StatPollRunning, STAMTYPE_COUNTER, "/TM/Poll/Running", STAMUNIT_OCCURENCES, "TMTimerPoll calls where the queues were being run.");567 569 568 570 STAM_REG(pVM, &pVM->tm.s.StatPollGIP, STAMTYPE_COUNTER, "/TM/PollGIP", STAMUNIT_OCCURENCES, "TMTimerPollGIP calls."); … … 589 591 STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSetFF, STAMTYPE_COUNTER, "/TM/VirtualGetSetFF", STAMUNIT_OCCURENCES, "Times we set the FF when calling TMTimerGet."); 590 592 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGet, STAMTYPE_COUNTER, "/TM/VirtualSyncGet", STAMUNIT_OCCURENCES, "The number of times tmVirtualSyncGetEx was called."); 591 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetELoop, STAMTYPE_COUNTER, "/TM/VirtualSyncGet/ELoop", STAMUNIT_OCCURENCES, "Times we give up because too many loops in tmVirtualSyncGetEx.");593 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetELoop, STAMTYPE_COUNTER, "/TM/VirtualSyncGet/ELoop", STAMUNIT_OCCURENCES, "Times tmVirtualSyncGetEx has given up getting a consistent virtual sync data set."); 592 594 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetExpired, STAMTYPE_COUNTER, "/TM/VirtualSyncGet/Expired", STAMUNIT_OCCURENCES, "Times tmVirtualSyncGetEx encountered an expired timer stopping the clock."); 593 595 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGetLocked, STAMTYPE_COUNTER, "/TM/VirtualSyncGet/Locked", STAMUNIT_OCCURENCES, "Times we successfully acquired the lock in tmVirtualSyncGetEx."); -
trunk/src/VBox/VMM/TMInternal.h
r19803 r19810 470 470 STAMCOUNTER StatPoll; 471 471 STAMCOUNTER StatPollAlreadySet; 472 STAMCOUNTER StatPollELoop; 473 STAMCOUNTER StatPollMiss; 474 STAMCOUNTER StatPollRunning; 475 STAMCOUNTER StatPollSimple; 472 476 STAMCOUNTER StatPollVirtual; 473 477 STAMCOUNTER StatPollVirtualSync; 474 STAMCOUNTER StatPollMiss;475 STAMCOUNTER StatPollRunning;476 478 /** @} */ 477 479 /** TMTimerPollGIP -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r19773 r19810 369 369 * TMCLOCK_VIRTUAL 370 370 */ 371 const uint64_t u64Expire1 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire;371 const uint64_t u64Expire1 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire); 372 372 const int64_t i64Delta1 = u64Expire1 - u64Now; 373 373 if (i64Delta1 <= 0) 374 374 { 375 LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now)); 376 if ( !pVM->tm.s.fRunningQueues 377 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) 378 { 379 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 380 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 381 #ifdef IN_RING3 382 REMR3NotifyTimerPending(pVM, pVCpuDst); 383 #endif 384 } 375 385 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtual); 376 LogFlow(("TMTimerPoll: expire1=%RU64 <= now=%RU64\n", u64Expire1, u64Now));377 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));378 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);379 #ifdef IN_RING3380 REMR3NotifyTimerPending(pVM, pVCpuDst);381 #endif382 386 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet; 383 387 } … … 388 392 * we have to adjust the 'now' but when have to adjust the delta as well. 389 393 */ 390 int rc = tmVirtualSyncLock(pVM); /** @todo FIXME: Stop playing safe here... */ 391 const uint64_t u64Expire2 = pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire; 394 395 /* 396 * Optimistic lockless approach. 397 */ 392 398 uint64_t u64VirtualSyncNow; 393 if (!pVM->tm.s.fVirtualSyncTicking) 394 u64VirtualSyncNow = pVM->tm.s.u64VirtualSync; 399 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire); 400 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)) 401 { 402 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)) 403 { 404 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync); 405 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking) 406 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp) 407 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync) 408 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire))) 409 { 410 u64VirtualSyncNow = u64Now - u64VirtualSyncNow; 411 if (u64VirtualSyncNow < u64Expire2) 412 { 413 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple); 414 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss); 415 return pVCpu == pVCpuDst 416 ? RT_MIN(i64Delta1, (int64_t)(u64Expire2 - u64VirtualSyncNow)) 417 : s_u64OtherRet; 418 } 419 420 if ( !pVM->tm.s.fRunningQueues 421 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) 422 { 423 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER))); 424 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER); 425 #ifdef IN_RING3 426 REMR3NotifyTimerPending(pVM, pVCpuDst); 427 #endif 428 } 429 430 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync); 431 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple); 432 LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now)); 433 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet; 434 } 435 } 436 } 395 437 else 396 438 { 397 if (!pVM->tm.s.fVirtualSyncCatchUp) 398 u64VirtualSyncNow = u64Now - pVM->tm.s.offVirtualSync; 399 else 400 { 401 uint64_t off = pVM->tm.s.offVirtualSync; 402 uint64_t u64Delta = u64Now - pVM->tm.s.u64VirtualSyncCatchUpPrev; 403 if (RT_LIKELY(!(u64Delta >> 32))) 439 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync); 440 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple); 441 LogFlow(("TMTimerPoll: stopped\n")); 442 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet; 443 } 444 445 /* 446 * Complicated lockless approach. 447 */ 448 uint64_t off; 449 uint32_t u32Pct = 0; 450 bool fCatchUp; 451 int cOuterTries = 42; 452 for (;; cOuterTries--) 453 { 454 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp); 455 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync); 456 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire); 457 if (fCatchUp) 458 { 459 /* No changes allowed, try get a consistent set of parameters. */ 460 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev); 461 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp); 462 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage); 463 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev) 464 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp) 465 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) 466 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync) 467 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire) 468 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp) 469 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)) 470 || cOuterTries <= 0) 404 471 { 405 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100); 406 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp) 407 off -= u64Sub; 472 uint64_t u64Delta = u64Now - u64Prev; 473 if (RT_LIKELY(!(u64Delta >> 32))) 474 { 475 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100); 476 if (off > u64Sub + offGivenUp) 477 off -= u64Sub; 478 else /* we've completely caught up. */ 479 off = offGivenUp; 480 } 408 481 else 409 off = pVM->tm.s.offVirtualSyncGivenUp; 482 /* More than 4 seconds since last time (or negative), ignore it. */ 483 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta)); 484 485 /* Check that we're still running and in catch up. */ 486 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking) 487 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)) 488 break; 410 489 } 411 u64VirtualSyncNow = u64Now - off; 412 } 413 } 490 } 491 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync) 492 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire) 493 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp) 494 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)) 495 break; /* Got an consistent offset */ 496 497 /* Repeat the initial checks before iterating. */ 498 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER)) 499 { 500 STAM_COUNTER_INC(&pVM->tm.s.StatPollAlreadySet); 501 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet; 502 } 503 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues)) 504 { 505 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning); 506 return s_u64OtherRet; 507 } 508 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)) 509 { 510 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync); 511 LogFlow(("TMTimerPoll: stopped\n")); 512 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet; 513 } 514 if (cOuterTries <= 0) 515 break; /* that's enough */ 516 } 517 if (cOuterTries <= 0) 518 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop); 519 u64VirtualSyncNow = u64Now - off; 520 414 521 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow; 415 522 if (i64Delta2 <= 0) … … 425 532 } 426 533 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync); 427 #ifndef IN_RING3428 if (RT_SUCCESS(rc))429 #endif430 tmVirtualSyncUnlock(pVM);431 534 LogFlow(("TMTimerPoll: expire2=%RU64 <= now=%RU64\n", u64Expire2, u64Now)); 432 535 return pVCpu == pVCpuDst ? 0 : s_u64OtherRet; 433 536 } 434 if (pVM->tm.s.fVirtualSyncCatchUp)435 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);436 537 437 538 /* … … 439 540 */ 440 541 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss); 441 #ifndef IN_RING3 442 if (RT_SUCCESS(rc)) 443 #endif 444 tmVirtualSyncUnlock(pVM); 445 return RT_MIN(i64Delta1, i64Delta2); 542 if (pVCpu == pVCpuDst) 543 { 544 if (fCatchUp) 545 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100); 546 return RT_MIN(i64Delta1, i64Delta2); 547 } 548 return s_u64OtherRet; 446 549 } 447 550 -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r19803 r19810 583 583 */ 584 584 uint64_t off; 585 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking) 586 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)) 587 { 588 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync); 589 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking) 590 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp) 591 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)) 592 { 593 if (u64 - off < ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)) 585 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)) 586 { 587 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)) 588 { 589 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync); 590 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking) 591 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp) 592 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync))) 594 593 { 595 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless); 596 return u64 - off; 594 off = u64 - off; 595 if (off < ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)) 596 { 597 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless); 598 return off; 599 } 597 600 } 601 } 602 } 603 else 604 { 605 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync); 606 if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))) 607 { 608 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless); 609 return off; 598 610 } 599 611 }
Note:
See TracChangeset
for help on using the changeset viewer.