Changeset 33170 in vbox
- Timestamp:
- Oct 15, 2010 10:51:56 PM (14 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VM.cpp
r32885 r33170 831 831 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 832 832 { 833 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state yielding.","/PROF/VM/CPU%d/Halt/Yield", idCpu);833 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state yielding.", "/PROF/VM/CPU%d/Halt/Yield", idCpu); 834 834 AssertRC(rc); 835 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state blocking.","/PROF/VM/CPU%d/Halt/Block", idCpu);835 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state blocking.", "/PROF/VM/CPU%d/Halt/Block", idCpu); 836 836 AssertRC(rc); 837 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/VM/CPU%d/Halt/Timers", idCpu); 837 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time wasted by blocking too long.", "/PROF/VM/CPU%d/Halt/BlockOverslept", idCpu); 838 AssertRC(rc); 839 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept when returning to early.","/PROF/VM/CPU%d/Halt/BlockInsomnia", idCpu); 840 AssertRC(rc); 841 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept on time.", "/PROF/VM/CPU%d/Halt/BlockOnTime", idCpu); 842 AssertRC(rc); 843 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/VM/CPU%d/Halt/Timers", idCpu); 838 844 AssertRC(rc); 839 845 } -
trunk/src/VBox/VMM/VMEmt.cpp
r30473 r33170 324 324 * addition to perhaps set an FF. 325 325 */ 326 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b);326 uint64_t const u64StartTimers = RTTimeNanoTS(); 327 327 TMR3TimerQueuesDo(pVM); 328 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b); 328 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers; 329 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedTimers); 329 330 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 330 331 || VMCPU_FF_ISPENDING(pVCpu, fMask)) … … 352 353 { 353 354 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++); 354 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltYield, a);355 uint64_t const u64StartSchedYield = RTTimeNanoTS(); 355 356 RTThreadYield(); /* this is the best we can do here */ 356 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltYield, a); 357 uint64_t const cNsElapsedSchedYield = RTTimeNanoTS() - u64StartSchedYield; 358 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedSchedYield); 357 359 } 358 360 else if (u64NanoTS < 2000000) 359 361 { 360 362 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++); 361 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a);363 uint64_t const u64StartSchedHalt = RTTimeNanoTS(); 362 364 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1); 363 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a); 365 uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt; 366 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt); 364 367 } 365 368 else 366 369 { 367 370 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15)); 368 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a);371 uint64_t const u64StartSchedHalt = RTTimeNanoTS(); 369 372 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15)); 370 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a); 373 uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt; 374 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt); 371 375 } 372 376 //uint64_t u64Slept = RTTimeNanoTS() - u64Start; … … 519 523 * Work the timers and check if we can exit. 520 524 */ 521 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b);525 uint64_t const u64StartTimers = RTTimeNanoTS(); 522 526 TMR3TimerQueuesDo(pVM); 523 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b); 527 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers; 528 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedTimers); 524 529 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 525 530 || VMCPU_FF_ISPENDING(pVCpu, fMask)) … … 554 559 else 555 560 cMilliSecs -= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg; 561 556 562 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS); 557 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, a);563 uint64_t const u64StartSchedHalt = RTTimeNanoTS(); 558 564 rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, cMilliSecs); 559 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, a); 565 uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt; 566 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt); 567 560 568 if (rc == VERR_TIMEOUT) 561 569 rc = VINF_SUCCESS; … … 609 617 static DECLCALLBACK(int) vmR3HaltGlobal1Init(PUVM pUVM) 610 618 { 619 /* 620 * The defaults. 621 */ 622 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = 50000; 623 uint32_t cNsResolution = SUPSemEventMultiGetResolution(pUVM->vm.s.pSession); 624 if (cNsResolution < 5*RT_NS_100US) 625 { 626 cNsResolution = RT_MAX(cNsResolution, 20000); 627 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = cNsResolution / 2; 628 } 629 630 /* 631 * Query overrides. 632 * 633 * I don't have time to bother with niceities such as invalid value checks 634 * here right now. sorry. 635 */ 636 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedGlobal1"); 637 if (pCfg) 638 { 639 uint32_t u32; 640 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "SpinBlockThreshold", &u32))) 641 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg = u32; 642 } 643 LogRel(("HaltedGlobal1 config: cNsSpinBlockThresholdCfg=%u\n", 644 pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg)); 611 645 return VINF_SUCCESS; 612 646 } … … 627 661 * Halt loop. 628 662 */ 663 //uint64_t u64NowLog, u64Start; 664 //u64Start = u64NowLog = RTTimeNanoTS(); 629 665 int rc = VINF_SUCCESS; 630 666 ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true); … … 635 671 * Work the timers and check if we can exit. 636 672 */ 637 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltTimers, b);673 uint64_t const u64StartTimers = RTTimeNanoTS(); 638 674 TMR3TimerQueuesDo(pVM); 639 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltTimers, b); 675 uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers; 676 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedTimers); 640 677 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 641 678 || VMCPU_FF_ISPENDING(pVCpu, fMask)) … … 645 682 * Estimate time left to the next event. 646 683 */ 684 //u64NowLog = RTTimeNanoTS(); 647 685 uint64_t u64Delta; 648 686 uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta); … … 654 692 * Block if we're not spinning and the interval isn't all that small. 655 693 */ 656 if (u64Delta > 50000 /* 0.050ms */)694 if (u64Delta >= pUVM->vm.s.Halt.Global1.cNsSpinBlockThresholdCfg) 657 695 { 658 696 VMMR3YieldStop(pVM); 659 697 if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) 660 698 || VMCPU_FF_ISPENDING(pVCpu, fMask)) 661 662 663 //RTLog RelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);664 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltBlock, c);699 break; 700 701 //RTLogPrintf("loop=%-3d u64GipTime=%'llu / %'llu now=%'llu / %'llu\n", cLoops, u64GipTime, u64Delta, u64NowLog, u64GipTime - u64NowLog); 702 uint64_t const u64StartSchedHalt = RTTimeNanoTS(); 665 703 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_HALT, u64GipTime, NULL); 666 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltBlock, c); 704 uint64_t const u64EndSchedHalt = RTTimeNanoTS(); 705 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt; 706 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt); 707 667 708 if (rc == VERR_INTERRUPTED) 668 709 rc = VINF_SUCCESS; … … 672 713 break; 673 714 } 715 else 716 { 717 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime; 718 if (cNsOverslept > 50000) 719 STAM_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOverslept, cNsOverslept); 720 else if (cNsOverslept < -50000) 721 STAM_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockInsomnia, cNsElapsedSchedHalt); 722 else 723 STAM_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlockOnTime, cNsElapsedSchedHalt); 724 } 674 725 } 675 726 /* … … 679 730 else if (!(cLoops & 0x1fff)) 680 731 { 681 STAM_REL_PROFILE_START(&pUVCpu->vm.s.StatHaltYield, d);732 uint64_t const u64StartSchedYield = RTTimeNanoTS(); 682 733 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, pVCpu->idCpu, VMMR0_DO_GVMM_SCHED_POLL, false /* don't yield */, NULL); 683 STAM_REL_PROFILE_STOP(&pUVCpu->vm.s.StatHaltYield, d); 684 } 685 } 686 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct); 734 uint64_t const cNsElapsedSchedYield = RTTimeNanoTS() - u64StartSchedYield; 735 STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedSchedYield); 736 } 737 } 738 //RTLogPrintf("*** %u loops %'llu; lag=%RU64\n", cLoops, u64NowLog - u64Start, TMVirtualSyncGetLag(pVM)); 687 739 688 740 ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false); -
trunk/src/VBox/VMM/VMInternal.h
r30473 r33170 278 278 uint32_t u32StopSpinningCfg; 279 279 } Method12; 280 281 /** 282 * The GVMM manages halted and waiting EMTs. 283 */ 284 struct 285 { 286 /** The threshold between spinning and blocking. */ 287 uint32_t cNsSpinBlockThresholdCfg; 288 } Global1; 280 289 } Halt; 281 290 … … 398 407 STAMPROFILE StatHaltYield; 399 408 STAMPROFILE StatHaltBlock; 409 STAMPROFILE StatHaltBlockOverslept; 410 STAMPROFILE StatHaltBlockInsomnia; 411 STAMPROFILE StatHaltBlockOnTime; 400 412 STAMPROFILE StatHaltTimers; 401 413 STAMPROFILE StatHaltPoll; -
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
r32885 r33170 201 201 /** The number of VMs. */ 202 202 uint16_t volatile cVMs; 203 // /** The number of halted EMT threads. */204 // uint16_t volatile cHaltedEMTs;203 /** Alignment padding. */ 204 uint16_t u16Reserved; 205 205 /** The number of EMTs. */ 206 206 uint32_t volatile cEMTs; 207 /** The number of EMTs that have halted in GVMMR0SchedHalt. */ 208 uint32_t volatile cHaltedEMTs; 209 /** Alignment padding. */ 210 uint32_t u32Alignment; 211 /** When the next halted or sleeping EMT will wake up. 212 * This is set to 0 when it needs recalculating and to UINT64_MAX when 213 * there are no halted or sleeping EMTs in the GVMM. */ 214 uint64_t uNsNextEmtWakeup; 207 215 /** The lock used to serialize VM creation, destruction and associated events that 208 216 * isn't performance critical. Owners may acquire the list lock. */ … … 345 353 346 354 /* The default configuration values. */ 347 pGVMM->cEMTsMeansCompany = 1; /** @todo should be adjusted to relative to the cpu count or something... */ 348 pGVMM->nsMinSleepAlone = 750000 /* ns (0.750 ms) */; /** @todo this should be adjusted to be 75% (or something) of the scheduler granularity... */ 349 pGVMM->nsMinSleepCompany = 15000 /* ns (0.015 ms) */; 350 pGVMM->nsEarlyWakeUp1 = 25000 /* ns (0.025 ms) */; 351 pGVMM->nsEarlyWakeUp2 = 50000 /* ns (0.050 ms) */; 355 uint32_t cNsResolution = RTSemEventMultiGetResolution(); 356 pGVMM->cEMTsMeansCompany = 1; /** @todo should be adjusted to relative to the cpu count or something... */ 357 if (cNsResolution >= 5*RT_NS_100US) 358 { 359 pGVMM->nsMinSleepAlone = 750000 /* ns (0.750 ms) */; /** @todo this should be adjusted to be 75% (or something) of the scheduler granularity... */ 360 pGVMM->nsMinSleepCompany = 15000 /* ns (0.015 ms) */; 361 pGVMM->nsEarlyWakeUp1 = 25000 /* ns (0.025 ms) */; 362 pGVMM->nsEarlyWakeUp2 = 50000 /* ns (0.050 ms) */; 363 } 364 else 365 { 366 cNsResolution = RT_MIN(cNsResolution, 10000); 367 pGVMM->nsMinSleepAlone = cNsResolution - cNsResolution / 3; 368 pGVMM->nsMinSleepCompany = cNsResolution - cNsResolution / 3; 369 pGVMM->nsEarlyWakeUp1 = 0; 370 pGVMM->nsEarlyWakeUp2 = 0; 371 } 352 372 353 373 /* The host CPU data. */ … … 528 548 else if (!strcmp(pszName, "MinSleepAlone")) 529 549 { 530 if (u64Value <= 100000000)550 if (u64Value <= RT_NS_100MS) 531 551 pGVMM->nsMinSleepAlone = u64Value; 532 552 else … … 535 555 else if (!strcmp(pszName, "MinSleepCompany")) 536 556 { 537 if (u64Value <= 100000000)557 if (u64Value <= RT_NS_100MS) 538 558 pGVMM->nsMinSleepCompany = u64Value; 539 559 else … … 542 562 else if (!strcmp(pszName, "EarlyWakeUp1")) 543 563 { 544 if (u64Value <= 100000000)564 if (u64Value <= RT_NS_100MS) 545 565 pGVMM->nsEarlyWakeUp1 = u64Value; 546 566 else … … 549 569 else if (!strcmp(pszName, "EarlyWakeUp2")) 550 570 { 551 if (u64Value <= 100000000)571 if (u64Value <= RT_NS_100MS) 552 572 pGVMM->nsEarlyWakeUp2 = u64Value; 553 573 else … … 1583 1603 static unsigned gvmmR0SchedDoWakeUps(PGVMM pGVMM, uint64_t u64Now) 1584 1604 { 1605 /* 1606 * Skip this if we've got disabled because of high resolution wakeups or by 1607 * the user. 1608 */ 1609 if ( !pGVMM->nsEarlyWakeUp1 1610 && !pGVMM->nsEarlyWakeUp2) 1611 return 0; 1612 1585 1613 /** @todo Rewrite this algorithm. See performance defect XYZ. */ 1614 1615 /* 1616 * A cheap optimization to stop wasting so much time here on big setups. 1617 */ 1618 const uint64_t uNsEarlyWakeUp2 = u64Now + pGVMM->nsEarlyWakeUp2; 1619 if ( pGVMM->cHaltedEMTs == 0 1620 || uNsEarlyWakeUp2 > pGVMM->uNsNextEmtWakeup) 1621 return 0; 1586 1622 1587 1623 /* … … 1589 1625 * and look for VMs that should be woken up in the 2nd and 3rd passes. 1590 1626 */ 1591 unsigned cWoken = 0; 1592 unsigned cHalted = 0; 1593 unsigned cTodo2nd = 0; 1594 unsigned cTodo3rd = 0; 1627 const uint64_t uNsEarlyWakeUp1 = u64Now + pGVMM->nsEarlyWakeUp1; 1628 uint64_t u64Min = UINT64_MAX; 1629 unsigned cWoken = 0; 1630 unsigned cHalted = 0; 1631 unsigned cTodo2nd = 0; 1632 unsigned cTodo3rd = 0; 1595 1633 for (unsigned i = pGVMM->iUsedHead, cGuard = 0; 1596 1634 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles); … … 1603 1641 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++) 1604 1642 { 1605 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu]; 1606 1607 uint64_t u64 = pCurGVCpu->gvmm.s.u64HaltExpire; 1643 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu]; 1644 uint64_t u64 = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire); 1608 1645 if (u64) 1609 1646 { … … 1620 1657 { 1621 1658 cHalted++; 1622 if (u64 <= u 64Now + pGVMM->nsEarlyWakeUp1)1659 if (u64 <= uNsEarlyWakeUp1) 1623 1660 cTodo2nd++; 1624 else if (u64 <= u 64Now + pGVMM->nsEarlyWakeUp2)1661 else if (u64 <= uNsEarlyWakeUp2) 1625 1662 cTodo3rd++; 1663 else if (u64 < u64Min) 1664 u64 = u64Min; 1626 1665 } 1627 1666 } … … 1643 1682 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++) 1644 1683 { 1645 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];1646 1647 if ( pCurGVCpu->gvmm.s.u64HaltExpire1648 && pCurGVCpu->gvmm.s.u64HaltExpire <= u64Now + pGVMM->nsEarlyWakeUp1)1684 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu]; 1685 uint64_t u64 = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire); 1686 if ( u64 1687 && u64 <= uNsEarlyWakeUp1) 1649 1688 { 1650 1689 if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0)) … … 1673 1712 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++) 1674 1713 { 1675 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];1676 1677 if ( pCurGVCpu->gvmm.s.u64HaltExpire1678 && pCurGVCpu->gvmm.s.u64HaltExpire <= u64Now + pGVMM->nsEarlyWakeUp2)1714 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu]; 1715 uint64_t u64 = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire); 1716 if ( u64 1717 && u64 <= uNsEarlyWakeUp2) 1679 1718 { 1680 1719 if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0)) … … 1691 1730 } 1692 1731 1732 /* 1733 * Set the minimum value. 1734 */ 1735 pGVMM->uNsNextEmtWakeup = u64Min; 1736 1693 1737 return cWoken; 1694 1738 } … … 1732 1776 pCurGVCpu->gvmm.s.iCpuEmt = ASMGetApicId(); 1733 1777 1778 /* GIP hack: We might are frequently sleeping for short intervals where the 1779 difference between GIP and system time matters on systems with high resolution 1780 system time. So, convert the input from GIP to System time in that case. */ 1734 1781 Assert(ASMGetFlags() & X86_EFL_IF); 1735 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */ 1736 pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now); 1782 const uint64_t u64NowSys = RTTimeSystemNanoTS(); 1783 const uint64_t u64NowGip = RTTimeNanoTS(); 1784 pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64NowGip); 1737 1785 1738 1786 /* 1739 1787 * Go to sleep if we must... 1740 */ 1741 if ( u64Now < u64ExpireGipTime 1742 && u64ExpireGipTime - u64Now > (pGVMM->cEMTs > pGVMM->cEMTsMeansCompany 1743 ? pGVMM->nsMinSleepCompany 1744 : pGVMM->nsMinSleepAlone)) 1788 * Cap the sleep time to 1 second to be on the safe side. 1789 */ 1790 uint64_t cNsInterval = u64ExpireGipTime - u64NowGip; 1791 if ( u64NowGip < u64ExpireGipTime 1792 && cNsInterval >= (pGVMM->cEMTs > pGVMM->cEMTsMeansCompany 1793 ? pGVMM->nsMinSleepCompany 1794 : pGVMM->nsMinSleepAlone)) 1745 1795 { 1746 1796 pGVM->gvmm.s.StatsSched.cHaltBlocking++; 1747 ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, u64ExpireGipTime); 1797 if (cNsInterval > RT_NS_1SEC) 1798 u64ExpireGipTime = u64NowGip + RT_NS_1SEC; 1799 if (u64ExpireGipTime < pGVMM->uNsNextEmtWakeup) 1800 pGVMM->uNsNextEmtWakeup = u64ExpireGipTime; 1801 ASMAtomicWriteU64(&pCurGVCpu->gvmm.s.u64HaltExpire, u64ExpireGipTime); 1802 ASMAtomicIncU32(&pGVMM->cHaltedEMTs); 1748 1803 gvmmR0UsedUnlock(pGVMM); 1749 1804 1750 uint32_t cMillies = (u64ExpireGipTime - u64Now) / 1000000; 1751 /* Cap the timeout to one second. */ 1752 cMillies = RT_MIN(1000, cMillies); 1753 rc = RTSemEventMultiWaitNoResume(pCurGVCpu->gvmm.s.HaltEventMulti, cMillies ? cMillies : 1); 1754 ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0); 1755 if (rc == VERR_TIMEOUT) 1805 rc = RTSemEventMultiWaitEx(pCurGVCpu->gvmm.s.HaltEventMulti, 1806 RTSEMWAIT_FLAGS_ABSOLUTE | RTSEMWAIT_FLAGS_NANOSECS | RTSEMWAIT_FLAGS_INTERRUPTIBLE, 1807 u64NowGip > u64NowSys ? u64ExpireGipTime : u64NowSys + cNsInterval); 1808 1809 ASMAtomicWriteU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0); 1810 ASMAtomicDecU32(&pGVMM->cHaltedEMTs); 1811 1812 /* Reset the semaphore to try prevent a few false wake-ups. */ 1813 if (rc == VINF_SUCCESS) 1814 RTSemEventMultiReset(pCurGVCpu->gvmm.s.HaltEventMulti); 1815 else if (rc == VERR_TIMEOUT) 1756 1816 { 1757 1817 pGVM->gvmm.s.StatsSched.cHaltTimeouts++; … … 1763 1823 pGVM->gvmm.s.StatsSched.cHaltNotBlocking++; 1764 1824 gvmmR0UsedUnlock(pGVMM); 1765 } 1766 1767 /* Make sure false wake up calls (gvmmR0SchedDoWakeUps) cause us to spin. */ 1768 RTSemEventMultiReset(pCurGVCpu->gvmm.s.HaltEventMulti); 1825 RTSemEventMultiReset(pCurGVCpu->gvmm.s.HaltEventMulti); 1826 } 1769 1827 1770 1828 return rc; … … 1799 1857 { 1800 1858 rc = VINF_SUCCESS; 1801 ASMAtomic XchgU64(&pGVCpu->gvmm.s.u64HaltExpire, 0);1859 ASMAtomicWriteU64(&pGVCpu->gvmm.s.u64HaltExpire, 0); 1802 1860 } 1803 1861 else … … 2132 2190 pCpu->Ppt.iTickHistorization = 0; 2133 2191 pCpu->Ppt.uTimerHz = uHistMaxHz; 2134 uint32_t const cNsInterval = UINT32_C(1000000000)/ uHistMaxHz;2192 uint32_t const cNsInterval = RT_NS_1SEC / uHistMaxHz; 2135 2193 pCpu->Ppt.cNsInterval = cNsInterval; 2136 2194 if (cNsInterval < GVMMHOSTCPU_PPT_HIST_INTERVAL_NS) … … 2218 2276 pCpu->Ppt.iTickHistorization = 0; 2219 2277 pCpu->Ppt.uTimerHz = uHz; 2220 pCpu->Ppt.cNsInterval = cNsInterval = UINT32_C(1000000000)/ uHz;2278 pCpu->Ppt.cNsInterval = cNsInterval = RT_NS_1SEC / uHz; 2221 2279 if (cNsInterval < GVMMHOSTCPU_PPT_HIST_INTERVAL_NS) 2222 2280 pCpu->Ppt.cTicksHistoriziationInterval = ( GVMMHOSTCPU_PPT_HIST_INTERVAL_NS
Note:
See TracChangeset
for help on using the changeset viewer.