VirtualBox

Changeset 90659 in vbox


Ignore:
Timestamp:
Aug 12, 2021 12:18:35 PM (4 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
146271
Message:

VMM/PDMCritSectRwLeaveExcl: Signal waiters from ring-0/HM. Had to change PDMCritSectRwEnterExcl to play fair with other waiters, or the testcase would trigger the 'iLoop < 1000' assertion on the VMMLockT thread because it was getting starved. bugref:6695

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp

    r90658 r90659  
    3535#ifdef IN_RING3
    3636# include <iprt/lockvalidator.h>
     37#endif
     38#if defined(IN_RING3) || defined(IN_RING0)
    3739# include <iprt/semaphore.h>
    38 #endif
    39 #if defined(IN_RING3) || defined(IN_RING0)
    4040# include <iprt/thread.h>
    4141#endif
     
    11321132    /*
    11331133     * If we're in write mode now try grab the ownership. Play fair if there
    1134      * are threads already waiting, unless we're in ring-0.
     1134     * are threads already waiting.
    11351135     */
    11361136    bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
    1137 #if defined(IN_RING3)
    11381137              && (  ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
    1139                   || fTryOnly)
    1140 #endif
    1141                ;
     1138                  || fTryOnly);
    11421139    if (fDone)
    11431140    {
     
    15141511
    15151512        ASMNopPause();
    1516         if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
     1513        if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
     1514        { /*likely*/ }
     1515        else
    15171516            return VERR_SEM_DESTROYED;
     1517        ASMNopPause();
    15181518    }
    15191519
     
    15211521#elif defined(IN_RING0)
    15221522    /*
    1523      * Update the state.
    1524      */
    1525     if (   RTThreadPreemptIsEnabled(NIL_RTTHREAD)
    1526         && ASMIntAreEnabled())
     1523     * Ring-0: Try leave for real, depends on host and context.
     1524     */
     1525    Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe());
     1526    PVMCPUCC pVCpu = VMMGetCpu(pVM);
     1527    if (   pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
     1528        || VMMRZCallRing3IsEnabled(pVCpu)
     1529        || RTSemEventIsSignalSafe()
     1530        || (   VMMR0ThreadCtxHookIsEnabled(pVCpu)       /* Doesn't matter if Signal() blocks if we have hooks, ... */
     1531            && RTThreadPreemptIsEnabled(NIL_RTTHREAD)   /* ... and preemption is still enabled, */
     1532            && ASMIntAreEnabled())                      /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
     1533       )
    15271534    {
    15281535# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
     
    15461553                || (u64State & RTCSRW_CNT_RD_MASK) == 0)
    15471554            {
    1548                 /* Don't change the direction, wake up the next writer if any. */
     1555                /*
     1556                 * Don't change the direction, wake up the next writer if any.
     1557                 */
    15491558                u64State &= ~RTCSRW_CNT_WR_MASK;
    15501559                u64State |= c << RTCSRW_CNT_WR_SHIFT;
    15511560                if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
    15521561                {
    1553                     if (c > 0)
     1562                    int rc;
     1563                    if (c == 0)
     1564                        rc = VINF_SUCCESS;
     1565                    else if (RTSemEventIsSignalSafe() || pVCpu == NULL)
     1566                        rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
     1567                    else
    15541568                    {
    1555                         int rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
    1556                         AssertRC(rc);
     1569                        VMMR0EMTBLOCKCTX Ctx;
     1570                        rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
     1571                        VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
     1572
     1573                        rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
     1574
     1575                        VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
    15571576                    }
    1558                     break;
     1577                    AssertRC(rc);
     1578                    return rc;
    15591579                }
    15601580            }
    15611581            else
    15621582            {
    1563                 /* Reverse the direction and signal the reader threads. */
     1583                /*
     1584                 * Reverse the direction and signal the reader threads.
     1585                 */
    15641586                u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
    15651587                u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
     
    15681590                    Assert(!pThis->s.Core.fNeedReset);
    15691591                    ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
    1570                     int rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
     1592
     1593                    int rc;
     1594                    if (RTSemEventMultiIsSignalSafe() || pVCpu == NULL)
     1595                        rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
     1596                    else
     1597                    {
     1598                        VMMR0EMTBLOCKCTX Ctx;
     1599                        rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
     1600                        VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
     1601
     1602                        rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
     1603
     1604                        VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
     1605                    }
    15711606                    AssertRC(rc);
    1572                     break;
     1607                    return rc;
    15731608                }
    15741609            }
    15751610
    15761611            ASMNopPause();
    1577             if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
     1612            if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
     1613            { /*likely*/ }
     1614            else
    15781615                return VERR_SEM_DESTROYED;
    1579         }
     1616            ASMNopPause();
     1617        }
     1618        /* not reached! */
    15801619    }
    15811620#endif /* IN_RING0 */
     
    15851624     * Queue the requested exit for ring-3 execution.
    15861625     */
     1626# ifndef IN_RING0
    15871627    PVMCPUCC    pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
     1628# endif
    15881629    uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
    15891630    LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette