Changeset 33170 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Oct 15, 2010 10:51:56 PM (14 years ago)
- svn:sync-xref-src-repo-rev:
- 66706
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
r32885 r33170 201 201 /** The number of VMs. */ 202 202 uint16_t volatile cVMs; 203 // /** The number of halted EMT threads. */204 // uint16_t volatile cHaltedEMTs;203 /** Alignment padding. */ 204 uint16_t u16Reserved; 205 205 /** The number of EMTs. */ 206 206 uint32_t volatile cEMTs; 207 /** The number of EMTs that have halted in GVMMR0SchedHalt. */ 208 uint32_t volatile cHaltedEMTs; 209 /** Alignment padding. */ 210 uint32_t u32Alignment; 211 /** When the next halted or sleeping EMT will wake up. 212 * This is set to 0 when it needs recalculating and to UINT64_MAX when 213 * there are no halted or sleeping EMTs in the GVMM. */ 214 uint64_t uNsNextEmtWakeup; 207 215 /** The lock used to serialize VM creation, destruction and associated events that 208 216 * isn't performance critical. Owners may acquire the list lock. */ … … 345 353 346 354 /* The default configuration values. */ 347 pGVMM->cEMTsMeansCompany = 1; /** @todo should be adjusted to relative to the cpu count or something... */ 348 pGVMM->nsMinSleepAlone = 750000 /* ns (0.750 ms) */; /** @todo this should be adjusted to be 75% (or something) of the scheduler granularity... */ 349 pGVMM->nsMinSleepCompany = 15000 /* ns (0.015 ms) */; 350 pGVMM->nsEarlyWakeUp1 = 25000 /* ns (0.025 ms) */; 351 pGVMM->nsEarlyWakeUp2 = 50000 /* ns (0.050 ms) */; 355 uint32_t cNsResolution = RTSemEventMultiGetResolution(); 356 pGVMM->cEMTsMeansCompany = 1; /** @todo should be adjusted to relative to the cpu count or something... */ 357 if (cNsResolution >= 5*RT_NS_100US) 358 { 359 pGVMM->nsMinSleepAlone = 750000 /* ns (0.750 ms) */; /** @todo this should be adjusted to be 75% (or something) of the scheduler granularity... */ 360 pGVMM->nsMinSleepCompany = 15000 /* ns (0.015 ms) */; 361 pGVMM->nsEarlyWakeUp1 = 25000 /* ns (0.025 ms) */; 362 pGVMM->nsEarlyWakeUp2 = 50000 /* ns (0.050 ms) */; 363 } 364 else 365 { 366 cNsResolution = RT_MIN(cNsResolution, 10000); 367 pGVMM->nsMinSleepAlone = cNsResolution - cNsResolution / 3; 368 pGVMM->nsMinSleepCompany = cNsResolution - cNsResolution / 3; 369 pGVMM->nsEarlyWakeUp1 = 0; 370 pGVMM->nsEarlyWakeUp2 = 0; 371 } 352 372 353 373 /* The host CPU data. */ … … 528 548 else if (!strcmp(pszName, "MinSleepAlone")) 529 549 { 530 if (u64Value <= 100000000)550 if (u64Value <= RT_NS_100MS) 531 551 pGVMM->nsMinSleepAlone = u64Value; 532 552 else … … 535 555 else if (!strcmp(pszName, "MinSleepCompany")) 536 556 { 537 if (u64Value <= 100000000)557 if (u64Value <= RT_NS_100MS) 538 558 pGVMM->nsMinSleepCompany = u64Value; 539 559 else … … 542 562 else if (!strcmp(pszName, "EarlyWakeUp1")) 543 563 { 544 if (u64Value <= 100000000)564 if (u64Value <= RT_NS_100MS) 545 565 pGVMM->nsEarlyWakeUp1 = u64Value; 546 566 else … … 549 569 else if (!strcmp(pszName, "EarlyWakeUp2")) 550 570 { 551 if (u64Value <= 100000000)571 if (u64Value <= RT_NS_100MS) 552 572 pGVMM->nsEarlyWakeUp2 = u64Value; 553 573 else … … 1583 1603 static unsigned gvmmR0SchedDoWakeUps(PGVMM pGVMM, uint64_t u64Now) 1584 1604 { 1605 /* 1606 * Skip this if we've got disabled because of high resolution wakeups or by 1607 * the user. 1608 */ 1609 if ( !pGVMM->nsEarlyWakeUp1 1610 && !pGVMM->nsEarlyWakeUp2) 1611 return 0; 1612 1585 1613 /** @todo Rewrite this algorithm. See performance defect XYZ. */ 1614 1615 /* 1616 * A cheap optimization to stop wasting so much time here on big setups. 1617 */ 1618 const uint64_t uNsEarlyWakeUp2 = u64Now + pGVMM->nsEarlyWakeUp2; 1619 if ( pGVMM->cHaltedEMTs == 0 1620 || uNsEarlyWakeUp2 > pGVMM->uNsNextEmtWakeup) 1621 return 0; 1586 1622 1587 1623 /* … … 1589 1625 * and look for VMs that should be woken up in the 2nd and 3rd passes. 1590 1626 */ 1591 unsigned cWoken = 0; 1592 unsigned cHalted = 0; 1593 unsigned cTodo2nd = 0; 1594 unsigned cTodo3rd = 0; 1627 const uint64_t uNsEarlyWakeUp1 = u64Now + pGVMM->nsEarlyWakeUp1; 1628 uint64_t u64Min = UINT64_MAX; 1629 unsigned cWoken = 0; 1630 unsigned cHalted = 0; 1631 unsigned cTodo2nd = 0; 1632 unsigned cTodo3rd = 0; 1595 1633 for (unsigned i = pGVMM->iUsedHead, cGuard = 0; 1596 1634 i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles); … … 1603 1641 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++) 1604 1642 { 1605 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu]; 1606 1607 uint64_t u64 = pCurGVCpu->gvmm.s.u64HaltExpire; 1643 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu]; 1644 uint64_t u64 = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire); 1608 1645 if (u64) 1609 1646 { … … 1620 1657 { 1621 1658 cHalted++; 1622 if (u64 <= u 64Now + pGVMM->nsEarlyWakeUp1)1659 if (u64 <= uNsEarlyWakeUp1) 1623 1660 cTodo2nd++; 1624 else if (u64 <= u 64Now + pGVMM->nsEarlyWakeUp2)1661 else if (u64 <= uNsEarlyWakeUp2) 1625 1662 cTodo3rd++; 1663 else if (u64 < u64Min) 1664 u64 = u64Min; 1626 1665 } 1627 1666 } … … 1643 1682 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++) 1644 1683 { 1645 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];1646 1647 if ( pCurGVCpu->gvmm.s.u64HaltExpire1648 && pCurGVCpu->gvmm.s.u64HaltExpire <= u64Now + pGVMM->nsEarlyWakeUp1)1684 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu]; 1685 uint64_t u64 = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire); 1686 if ( u64 1687 && u64 <= uNsEarlyWakeUp1) 1649 1688 { 1650 1689 if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0)) … … 1673 1712 for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++) 1674 1713 { 1675 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];1676 1677 if ( pCurGVCpu->gvmm.s.u64HaltExpire1678 && pCurGVCpu->gvmm.s.u64HaltExpire <= u64Now + pGVMM->nsEarlyWakeUp2)1714 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu]; 1715 uint64_t u64 = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire); 1716 if ( u64 1717 && u64 <= uNsEarlyWakeUp2) 1679 1718 { 1680 1719 if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0)) … … 1691 1730 } 1692 1731 1732 /* 1733 * Set the minimum value. 1734 */ 1735 pGVMM->uNsNextEmtWakeup = u64Min; 1736 1693 1737 return cWoken; 1694 1738 } … … 1732 1776 pCurGVCpu->gvmm.s.iCpuEmt = ASMGetApicId(); 1733 1777 1778 /* GIP hack: We might are frequently sleeping for short intervals where the 1779 difference between GIP and system time matters on systems with high resolution 1780 system time. So, convert the input from GIP to System time in that case. */ 1734 1781 Assert(ASMGetFlags() & X86_EFL_IF); 1735 const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */ 1736 pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now); 1782 const uint64_t u64NowSys = RTTimeSystemNanoTS(); 1783 const uint64_t u64NowGip = RTTimeNanoTS(); 1784 pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64NowGip); 1737 1785 1738 1786 /* 1739 1787 * Go to sleep if we must... 1740 */ 1741 if ( u64Now < u64ExpireGipTime 1742 && u64ExpireGipTime - u64Now > (pGVMM->cEMTs > pGVMM->cEMTsMeansCompany 1743 ? pGVMM->nsMinSleepCompany 1744 : pGVMM->nsMinSleepAlone)) 1788 * Cap the sleep time to 1 second to be on the safe side. 1789 */ 1790 uint64_t cNsInterval = u64ExpireGipTime - u64NowGip; 1791 if ( u64NowGip < u64ExpireGipTime 1792 && cNsInterval >= (pGVMM->cEMTs > pGVMM->cEMTsMeansCompany 1793 ? pGVMM->nsMinSleepCompany 1794 : pGVMM->nsMinSleepAlone)) 1745 1795 { 1746 1796 pGVM->gvmm.s.StatsSched.cHaltBlocking++; 1747 ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, u64ExpireGipTime); 1797 if (cNsInterval > RT_NS_1SEC) 1798 u64ExpireGipTime = u64NowGip + RT_NS_1SEC; 1799 if (u64ExpireGipTime < pGVMM->uNsNextEmtWakeup) 1800 pGVMM->uNsNextEmtWakeup = u64ExpireGipTime; 1801 ASMAtomicWriteU64(&pCurGVCpu->gvmm.s.u64HaltExpire, u64ExpireGipTime); 1802 ASMAtomicIncU32(&pGVMM->cHaltedEMTs); 1748 1803 gvmmR0UsedUnlock(pGVMM); 1749 1804 1750 uint32_t cMillies = (u64ExpireGipTime - u64Now) / 1000000; 1751 /* Cap the timeout to one second. */ 1752 cMillies = RT_MIN(1000, cMillies); 1753 rc = RTSemEventMultiWaitNoResume(pCurGVCpu->gvmm.s.HaltEventMulti, cMillies ? cMillies : 1); 1754 ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0); 1755 if (rc == VERR_TIMEOUT) 1805 rc = RTSemEventMultiWaitEx(pCurGVCpu->gvmm.s.HaltEventMulti, 1806 RTSEMWAIT_FLAGS_ABSOLUTE | RTSEMWAIT_FLAGS_NANOSECS | RTSEMWAIT_FLAGS_INTERRUPTIBLE, 1807 u64NowGip > u64NowSys ? u64ExpireGipTime : u64NowSys + cNsInterval); 1808 1809 ASMAtomicWriteU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0); 1810 ASMAtomicDecU32(&pGVMM->cHaltedEMTs); 1811 1812 /* Reset the semaphore to try prevent a few false wake-ups. */ 1813 if (rc == VINF_SUCCESS) 1814 RTSemEventMultiReset(pCurGVCpu->gvmm.s.HaltEventMulti); 1815 else if (rc == VERR_TIMEOUT) 1756 1816 { 1757 1817 pGVM->gvmm.s.StatsSched.cHaltTimeouts++; … … 1763 1823 pGVM->gvmm.s.StatsSched.cHaltNotBlocking++; 1764 1824 gvmmR0UsedUnlock(pGVMM); 1765 } 1766 1767 /* Make sure false wake up calls (gvmmR0SchedDoWakeUps) cause us to spin. */ 1768 RTSemEventMultiReset(pCurGVCpu->gvmm.s.HaltEventMulti); 1825 RTSemEventMultiReset(pCurGVCpu->gvmm.s.HaltEventMulti); 1826 } 1769 1827 1770 1828 return rc; … … 1799 1857 { 1800 1858 rc = VINF_SUCCESS; 1801 ASMAtomic XchgU64(&pGVCpu->gvmm.s.u64HaltExpire, 0);1859 ASMAtomicWriteU64(&pGVCpu->gvmm.s.u64HaltExpire, 0); 1802 1860 } 1803 1861 else … … 2132 2190 pCpu->Ppt.iTickHistorization = 0; 2133 2191 pCpu->Ppt.uTimerHz = uHistMaxHz; 2134 uint32_t const cNsInterval = UINT32_C(1000000000)/ uHistMaxHz;2192 uint32_t const cNsInterval = RT_NS_1SEC / uHistMaxHz; 2135 2193 pCpu->Ppt.cNsInterval = cNsInterval; 2136 2194 if (cNsInterval < GVMMHOSTCPU_PPT_HIST_INTERVAL_NS) … … 2218 2276 pCpu->Ppt.iTickHistorization = 0; 2219 2277 pCpu->Ppt.uTimerHz = uHz; 2220 pCpu->Ppt.cNsInterval = cNsInterval = UINT32_C(1000000000)/ uHz;2278 pCpu->Ppt.cNsInterval = cNsInterval = RT_NS_1SEC / uHz; 2221 2279 if (cNsInterval < GVMMHOSTCPU_PPT_HIST_INTERVAL_NS) 2222 2280 pCpu->Ppt.cTicksHistoriziationInterval = ( GVMMHOSTCPU_PPT_HIST_INTERVAL_NS
Note:
See TracChangeset
for help on using the changeset viewer.