VirtualBox

Changeset 33170 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Oct 15, 2010 10:51:56 PM (14 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
66706
Message:

GVMM,VMEmt: Use RTSemEventWaitEx, avoid the syrup algorithm in gvmmR0SchedDoWakeUps.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp

    r32885 r33170  
    201201    /** The number of VMs. */
    202202    uint16_t volatile   cVMs;
    203 //    /** The number of halted EMT threads. */
    204 //    uint16_t volatile   cHaltedEMTs;
     203    /** Alignment padding. */
     204    uint16_t            u16Reserved;
    205205    /** The number of EMTs. */
    206206    uint32_t volatile   cEMTs;
     207    /** The number of EMTs that have halted in GVMMR0SchedHalt. */
     208    uint32_t volatile   cHaltedEMTs;
     209    /** Alignment padding. */
     210    uint32_t            u32Alignment;
     211    /** When the next halted or sleeping EMT will wake up.
     212     * This is set to 0 when it needs recalculating and to UINT64_MAX when
     213     * there are no halted or sleeping EMTs in the GVMM. */
     214    uint64_t            uNsNextEmtWakeup;
    207215    /** The lock used to serialize VM creation, destruction and associated events that
    208216     * isn't performance critical. Owners may acquire the list lock. */
     
    345353
    346354            /* The default configuration values. */
    347             pGVMM->cEMTsMeansCompany = 1;                           /** @todo should be adjusted to relative to the cpu count or something... */
    348             pGVMM->nsMinSleepAlone   = 750000 /* ns (0.750 ms) */;  /** @todo this should be adjusted to be 75% (or something) of the scheduler granularity... */
    349             pGVMM->nsMinSleepCompany =  15000 /* ns (0.015 ms) */;
    350             pGVMM->nsEarlyWakeUp1    =  25000 /* ns (0.025 ms) */;
    351             pGVMM->nsEarlyWakeUp2    =  50000 /* ns (0.050 ms) */;
     355            uint32_t cNsResolution = RTSemEventMultiGetResolution();
     356            pGVMM->cEMTsMeansCompany     = 1;                           /** @todo should be adjusted to relative to the cpu count or something... */
     357            if (cNsResolution >= 5*RT_NS_100US)
     358            {
     359                pGVMM->nsMinSleepAlone   = 750000 /* ns (0.750 ms) */;  /** @todo this should be adjusted to be 75% (or something) of the scheduler granularity... */
     360                pGVMM->nsMinSleepCompany =  15000 /* ns (0.015 ms) */;
     361                pGVMM->nsEarlyWakeUp1    =  25000 /* ns (0.025 ms) */;
     362                pGVMM->nsEarlyWakeUp2    =  50000 /* ns (0.050 ms) */;
     363            }
     364            else
     365            {
     366                cNsResolution = RT_MIN(cNsResolution, 10000);
     367                pGVMM->nsMinSleepAlone   = cNsResolution - cNsResolution / 3;
     368                pGVMM->nsMinSleepCompany = cNsResolution - cNsResolution / 3;
     369                pGVMM->nsEarlyWakeUp1    = 0;
     370                pGVMM->nsEarlyWakeUp2    = 0;
     371            }
    352372
    353373            /* The host CPU data. */
     
    528548    else if (!strcmp(pszName, "MinSleepAlone"))
    529549    {
    530         if (u64Value <= 100000000)
     550        if (u64Value <= RT_NS_100MS)
    531551            pGVMM->nsMinSleepAlone = u64Value;
    532552        else
     
    535555    else if (!strcmp(pszName, "MinSleepCompany"))
    536556    {
    537         if (u64Value <= 100000000)
     557        if (u64Value <= RT_NS_100MS)
    538558            pGVMM->nsMinSleepCompany = u64Value;
    539559        else
     
    542562    else if (!strcmp(pszName, "EarlyWakeUp1"))
    543563    {
    544         if (u64Value <= 100000000)
     564        if (u64Value <= RT_NS_100MS)
    545565            pGVMM->nsEarlyWakeUp1 = u64Value;
    546566        else
     
    549569    else if (!strcmp(pszName, "EarlyWakeUp2"))
    550570    {
    551         if (u64Value <= 100000000)
     571        if (u64Value <= RT_NS_100MS)
    552572            pGVMM->nsEarlyWakeUp2 = u64Value;
    553573        else
     
    15831603static unsigned gvmmR0SchedDoWakeUps(PGVMM pGVMM, uint64_t u64Now)
    15841604{
     1605    /*
     1606     * Skip this if we've got disabled because of high resolution wakeups or by
     1607     * the user.
     1608     */
     1609    if (   !pGVMM->nsEarlyWakeUp1
     1610        && !pGVMM->nsEarlyWakeUp2)
     1611        return 0;
     1612
    15851613/** @todo Rewrite this algorithm. See performance defect XYZ. */
     1614
     1615    /*
     1616     * A cheap optimization to stop wasting so much time here on big setups.
     1617     */
     1618    const uint64_t  uNsEarlyWakeUp2 = u64Now + pGVMM->nsEarlyWakeUp2;
     1619    if (   pGVMM->cHaltedEMTs == 0
     1620        || uNsEarlyWakeUp2 > pGVMM->uNsNextEmtWakeup)
     1621        return 0;
    15861622
    15871623    /*
     
    15891625     * and look for VMs that should be woken up in the 2nd and 3rd passes.
    15901626     */
    1591     unsigned cWoken = 0;
    1592     unsigned cHalted = 0;
    1593     unsigned cTodo2nd = 0;
    1594     unsigned cTodo3rd = 0;
     1627    const uint64_t  uNsEarlyWakeUp1 = u64Now + pGVMM->nsEarlyWakeUp1;
     1628    uint64_t        u64Min          = UINT64_MAX;
     1629    unsigned        cWoken          = 0;
     1630    unsigned        cHalted         = 0;
     1631    unsigned        cTodo2nd        = 0;
     1632    unsigned        cTodo3rd        = 0;
    15951633    for (unsigned i = pGVMM->iUsedHead, cGuard = 0;
    15961634         i != NIL_GVM_HANDLE && i < RT_ELEMENTS(pGVMM->aHandles);
     
    16031641            for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
    16041642            {
    1605                 PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
    1606 
    1607                 uint64_t u64 = pCurGVCpu->gvmm.s.u64HaltExpire;
     1643                PGVMCPU     pCurGVCpu = &pCurGVM->aCpus[idCpu];
     1644                uint64_t    u64       = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire);
    16081645                if (u64)
    16091646                {
     
    16201657                    {
    16211658                        cHalted++;
    1622                         if (u64 <= u64Now + pGVMM->nsEarlyWakeUp1)
     1659                        if (u64 <= uNsEarlyWakeUp1)
    16231660                            cTodo2nd++;
    1624                         else if (u64 <= u64Now + pGVMM->nsEarlyWakeUp2)
     1661                        else if (u64 <= uNsEarlyWakeUp2)
    16251662                            cTodo3rd++;
     1663                        else if (u64 < u64Min)
     1664                            u64 = u64Min;
    16261665                    }
    16271666                }
     
    16431682                for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
    16441683                {
    1645                     PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
    1646 
    1647                     if (    pCurGVCpu->gvmm.s.u64HaltExpire
    1648                         &&  pCurGVCpu->gvmm.s.u64HaltExpire <= u64Now + pGVMM->nsEarlyWakeUp1)
     1684                    PGVMCPU     pCurGVCpu = &pCurGVM->aCpus[idCpu];
     1685                    uint64_t    u64       = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire);
     1686                    if (   u64
     1687                        && u64 <= uNsEarlyWakeUp1)
    16491688                    {
    16501689                        if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0))
     
    16731712                for (VMCPUID idCpu = 0; idCpu < pCurGVM->cCpus; idCpu++)
    16741713                {
    1675                     PGVMCPU pCurGVCpu = &pCurGVM->aCpus[idCpu];
    1676 
    1677                     if (    pCurGVCpu->gvmm.s.u64HaltExpire
    1678                         &&  pCurGVCpu->gvmm.s.u64HaltExpire <= u64Now + pGVMM->nsEarlyWakeUp2)
     1714                    PGVMCPU     pCurGVCpu = &pCurGVM->aCpus[idCpu];
     1715                    uint64_t    u64       = ASMAtomicUoReadU64(&pCurGVCpu->gvmm.s.u64HaltExpire);
     1716                    if (   u64
     1717                        && u64 <= uNsEarlyWakeUp2)
    16791718                    {
    16801719                        if (ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0))
     
    16911730    }
    16921731
     1732    /*
     1733     * Set the minimum value.
     1734     */
     1735    pGVMM->uNsNextEmtWakeup = u64Min;
     1736
    16931737    return cWoken;
    16941738}
     
    17321776    pCurGVCpu->gvmm.s.iCpuEmt = ASMGetApicId();
    17331777
     1778    /* GIP hack: We might are frequently sleeping for short intervals where the
     1779       difference between GIP and system time matters on systems with high resolution
     1780       system time. So, convert the input from GIP to System time in that case. */
    17341781    Assert(ASMGetFlags() & X86_EFL_IF);
    1735     const uint64_t u64Now = RTTimeNanoTS(); /* (GIP time) */
    1736     pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64Now);
     1782    const uint64_t u64NowSys = RTTimeSystemNanoTS();
     1783    const uint64_t u64NowGip = RTTimeNanoTS();
     1784    pGVM->gvmm.s.StatsSched.cHaltWakeUps += gvmmR0SchedDoWakeUps(pGVMM, u64NowGip);
    17371785
    17381786    /*
    17391787     * Go to sleep if we must...
    1740      */
    1741     if (    u64Now < u64ExpireGipTime
    1742         &&  u64ExpireGipTime - u64Now > (pGVMM->cEMTs > pGVMM->cEMTsMeansCompany
    1743                                          ? pGVMM->nsMinSleepCompany
    1744                                          : pGVMM->nsMinSleepAlone))
     1788     * Cap the sleep time to 1 second to be on the safe side.
     1789     */
     1790    uint64_t cNsInterval = u64ExpireGipTime - u64NowGip;
     1791    if (    u64NowGip < u64ExpireGipTime
     1792        &&  cNsInterval >= (pGVMM->cEMTs > pGVMM->cEMTsMeansCompany
     1793                            ? pGVMM->nsMinSleepCompany
     1794                            : pGVMM->nsMinSleepAlone))
    17451795    {
    17461796        pGVM->gvmm.s.StatsSched.cHaltBlocking++;
    1747         ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, u64ExpireGipTime);
     1797        if (cNsInterval > RT_NS_1SEC)
     1798            u64ExpireGipTime = u64NowGip + RT_NS_1SEC;
     1799        if (u64ExpireGipTime < pGVMM->uNsNextEmtWakeup)
     1800            pGVMM->uNsNextEmtWakeup = u64ExpireGipTime;
     1801        ASMAtomicWriteU64(&pCurGVCpu->gvmm.s.u64HaltExpire, u64ExpireGipTime);
     1802        ASMAtomicIncU32(&pGVMM->cHaltedEMTs);
    17481803        gvmmR0UsedUnlock(pGVMM);
    17491804
    1750         uint32_t cMillies = (u64ExpireGipTime - u64Now) / 1000000;
    1751         /* Cap the timeout to one second. */
    1752         cMillies = RT_MIN(1000, cMillies);
    1753         rc = RTSemEventMultiWaitNoResume(pCurGVCpu->gvmm.s.HaltEventMulti, cMillies ? cMillies : 1);
    1754         ASMAtomicXchgU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0);
    1755         if (rc == VERR_TIMEOUT)
     1805        rc = RTSemEventMultiWaitEx(pCurGVCpu->gvmm.s.HaltEventMulti,
     1806                                   RTSEMWAIT_FLAGS_ABSOLUTE | RTSEMWAIT_FLAGS_NANOSECS | RTSEMWAIT_FLAGS_INTERRUPTIBLE,
     1807                                   u64NowGip > u64NowSys ? u64ExpireGipTime : u64NowSys + cNsInterval);
     1808
     1809        ASMAtomicWriteU64(&pCurGVCpu->gvmm.s.u64HaltExpire, 0);
     1810        ASMAtomicDecU32(&pGVMM->cHaltedEMTs);
     1811
     1812        /* Reset the semaphore to try prevent a few false wake-ups. */
     1813        if (rc == VINF_SUCCESS)
     1814            RTSemEventMultiReset(pCurGVCpu->gvmm.s.HaltEventMulti);
     1815        else if (rc == VERR_TIMEOUT)
    17561816        {
    17571817            pGVM->gvmm.s.StatsSched.cHaltTimeouts++;
     
    17631823        pGVM->gvmm.s.StatsSched.cHaltNotBlocking++;
    17641824        gvmmR0UsedUnlock(pGVMM);
    1765     }
    1766 
    1767     /* Make sure false wake up calls (gvmmR0SchedDoWakeUps) cause us to spin. */
    1768     RTSemEventMultiReset(pCurGVCpu->gvmm.s.HaltEventMulti);
     1825        RTSemEventMultiReset(pCurGVCpu->gvmm.s.HaltEventMulti);
     1826    }
    17691827
    17701828    return rc;
     
    17991857    {
    18001858        rc = VINF_SUCCESS;
    1801         ASMAtomicXchgU64(&pGVCpu->gvmm.s.u64HaltExpire, 0);
     1859        ASMAtomicWriteU64(&pGVCpu->gvmm.s.u64HaltExpire, 0);
    18021860    }
    18031861    else
     
    21322190            pCpu->Ppt.iTickHistorization    = 0;
    21332191            pCpu->Ppt.uTimerHz              = uHistMaxHz;
    2134             uint32_t const cNsInterval      = UINT32_C(1000000000) / uHistMaxHz;
     2192            uint32_t const cNsInterval      = RT_NS_1SEC / uHistMaxHz;
    21352193            pCpu->Ppt.cNsInterval           = cNsInterval;
    21362194            if (cNsInterval < GVMMHOSTCPU_PPT_HIST_INTERVAL_NS)
     
    22182276            pCpu->Ppt.iTickHistorization    = 0;
    22192277            pCpu->Ppt.uTimerHz              = uHz;
    2220             pCpu->Ppt.cNsInterval           = cNsInterval = UINT32_C(1000000000) / uHz;
     2278            pCpu->Ppt.cNsInterval           = cNsInterval = RT_NS_1SEC / uHz;
    22212279            if (cNsInterval < GVMMHOSTCPU_PPT_HIST_INTERVAL_NS)
    22222280                pCpu->Ppt.cTicksHistoriziationInterval = (  GVMMHOSTCPU_PPT_HIST_INTERVAL_NS
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette