VirtualBox

Changeset 90504 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Aug 3, 2021 9:24:16 PM (4 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
146108
Message:

VMM/PDM: Rewroted the PDMCritSectLeave code to try avoid going to ring-3 when there is contention (partially disabled). bugref:6695

Location:
trunk/src/VBox/VMM
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp

    r90486 r90504  
    3535#ifdef IN_RING3
    3636# include <iprt/lockvalidator.h>
     37#endif
     38#if defined(IN_RING3) || defined(IN_RING0)
    3739# include <iprt/semaphore.h>
    3840#endif
     
    8890
    8991
     92#ifdef IN_RING0
     93/**
     94 * Marks the critical section as corrupted.
     95 */
     96DECL_NO_INLINE(static, int) pdmCritSectCorrupted(PPDMCRITSECT pCritSect, const char *pszMsg)
     97{
     98    ASMAtomicWriteU32(&pCritSect->s.Core.u32Magic, PDMCRITSECT_MAGIC_CORRUPTED);
     99    LogRel(("PDMCritSect: %s pCritSect=%p\n", pszMsg, pCritSect));
     100    return VERR_PDM_CRITSECT_IPE;
     101}
     102#endif
     103
     104
    90105/**
    91106 * Tail code called when we've won the battle for the lock.
     
    155170     * This handles VERR_TIMEOUT and VERR_INTERRUPTED.
    156171     */
    157     STAM_REL_PROFILE_START(&pCritSect->s.StatWait, a);
     172    STAM_REL_PROFILE_START(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
    158173    PSUPDRVSESSION const    pSession    = pVM->pSession;
    159174    SUPSEMEVENT const       hEvent      = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
     
    224239        if (rc == VINF_SUCCESS)
    225240        {
    226             STAM_REL_PROFILE_STOP(&pCritSect->s.StatContentionWait, a);
     241            STAM_REL_PROFILE_STOP(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
    227242            return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
    228243        }
     
    330345                            {
    331346                                STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectEntersWhileAborting);
    332                                 STAM_REL_PROFILE_STOP(&pCritSect->s.StatContentionWait, a);
     347                                STAM_REL_PROFILE_STOP(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
    333348                                return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
    334349                            }
     
    392407    Assert(pCritSect->s.Core.cNestings < 8);  /* useful to catch incorrect locking */
    393408    Assert(pCritSect->s.Core.cNestings >= 0);
     409#if defined(VBOX_STRICT) && defined(IN_RING0)
     410    /* Hope we're not messing with critical sections while in the no-block
     411       zone, that would complicate things a lot. */
     412    PVMCPUCC pVCpuAssert = VMMGetCpu(pVM);
     413    Assert(pVCpuAssert && VMMRZCallRing3IsEnabled(pVCpuAssert));
     414#endif
    394415
    395416    /*
     
    739760 * @param   pVM         The cross context VM structure.
    740761 * @param   pCritSect   The PDM critical section to leave.
     762 *
     763 * @remarks Can be called from no-ring-3-call context in ring-0 (TM/VirtualSync)
     764 *          where we'll queue leaving operation for ring-3 processing.
    741765 */
    742766VMMDECL(int) PDMCritSectLeave(PVMCC pVM, PPDMCRITSECT pCritSect)
     
    745769    Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
    746770
    747     /* Check for NOP sections before asserting ownership. */
     771    /*
     772     * Check for NOP sections before asserting ownership.
     773     */
    748774    if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
    749775    { /* We're more likely to end up here with real critsects than a NOP one. */ }
     
    768794    if (cNestings > 1)
    769795    {
    770 # ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
     796#ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
    771797        pCritSect->s.Core.cNestings = cNestings - 1;
    772 # else
     798#else
    773799        ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, cNestings - 1);
    774 # endif
     800#endif
    775801        ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
    776802        Assert(pCritSect->s.Core.cLockers >= 0);
     
    778804    }
    779805
    780 #ifdef IN_RING0
    781 # if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
    782     if (1) /* SUPSemEventSignal is safe */
    783 # else
    784     if (ASMIntAreEnabled())
    785 # endif
    786 #endif
    787 #if defined(IN_RING3) || defined(IN_RING0)
    788     {
    789         /*
    790          * Leave for real.
    791          */
    792         /* update members. */
    793         SUPSEMEVENT hEventToSignal  = pCritSect->s.hEventToSignal;
    794         pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
     806
     807#ifdef IN_RING3
     808    /*
     809     * Ring-3: Leave for real.
     810     */
     811    SUPSEMEVENT const hEventToSignal = pCritSect->s.hEventToSignal;
     812    pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
     813
    795814# ifdef IN_RING3
    796815#  if defined(PDMCRITSECT_STRICT)
    797         if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
    798             RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
     816    if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
     817        RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
    799818#  endif
    800         Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
    801 # endif
     819    Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
     820# endif
     821
     822# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
     823    //pCritSect->s.Core.cNestings = 0; /* not really needed */
     824    pCritSect->s.Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
     825# else
     826    ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
     827    ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
     828# endif
     829    ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
     830
     831    /* Stop profiling and decrement lockers. */
     832    STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
     833    ASMCompilerBarrier();
     834    int32_t const cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
     835    if (cLockers < 0)
     836        AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));
     837    else
     838    {
     839        /* Someone is waiting, wake up one of them. */
     840        Assert(cLockers < _8K);
     841        SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
     842        int rc = SUPSemEventSignal(pVM->pSession, hEvent);
     843        AssertRC(rc);
     844    }
     845
     846    /* Signal exit event. */
     847    if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
     848    { /* likely */ }
     849    else
     850    {
     851        Log8(("Signalling %#p\n", hEventToSignal));
     852        int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
     853        AssertRC(rc);
     854    }
     855
     856    return VINF_SUCCESS;
     857
     858
     859#elif defined(IN_RING0)
     860    /*
     861     * Ring-0: Try leave for real, depends on host and context.
     862     */
     863    SUPSEMEVENT const hEventToSignal = pCritSect->s.hEventToSignal;
     864    pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
     865    PVMCPUCC pVCpu           = VMMGetCpu(pVM);
     866    bool     fQueueOnTrouble = true;
     867    if (   pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
     868        || VMMRZCallRing3IsEnabled(pVCpu)
     869        || RTSemEventIsSignalSafe()
     870        || (fQueueOnTrouble = (   hEventToSignal == NIL_SUPSEMEVENT
     871                               && ASMAtomicUoReadS32(&pCritSect->s.Core.cLockers) == 0)) )
     872    {
     873        pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
     874
    802875# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
    803876        //pCritSect->s.Core.cNestings = 0; /* not really needed */
     
    809882        ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
    810883
    811         /* stop and decrement lockers. */
     884        /*
     885         * Stop profiling and decrement lockers.
     886         */
    812887        STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
    813888        ASMCompilerBarrier();
    814         if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) < 0)
    815         { /* hopefully likely */ }
     889
     890        bool    fQueueIt = false;
     891        int32_t cLockers;
     892        if (!fQueueOnTrouble)
     893            cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
    816894        else
    817895        {
    818             /* Someone is waiting, wake up one of them. */
    819             SUPSEMEVENT     hEvent   = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
    820             PSUPDRVSESSION  pSession = pVM->pSession;
    821             int rc = SUPSemEventSignal(pSession, hEvent);
    822             AssertRC(rc);
     896            cLockers = -1;
     897            if (!ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
     898                fQueueIt = true;
    823899        }
    824 
    825         /* Signal exit event. */
    826         if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
    827         { /* likely */ }
    828         else
     900        if (!fQueueIt)
    829901        {
    830             Log8(("Signalling %#p\n", hEventToSignal));
    831             int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
    832             AssertRC(rc);
     902            VMMR0EMTBLOCKCTX    Ctx;
     903            bool                fLeaveCtx = false;
     904            if (cLockers < 0)
     905                AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));
     906            else
     907            {
     908                /* Someone is waiting, wake up one of them. */
     909                Assert(cLockers < _8K);
     910                SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
     911                if (!RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
     912                {
     913                    int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
     914                    AssertReleaseRCReturn(rc, rc);
     915                    fLeaveCtx = true;
     916                }
     917                int rc = SUPSemEventSignal(pVM->pSession, hEvent);
     918                AssertRC(rc);
     919            }
     920
     921            /*
     922             * Signal exit event.
     923             */
     924            if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
     925            { /* likely */ }
     926            else
     927            {
     928                if (!fLeaveCtx && pVCpu != NULL && !RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
     929                {
     930                    int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
     931                    AssertReleaseRCReturn(rc, rc);
     932                    fLeaveCtx = true;
     933                }
     934                Log8(("Signalling %#p\n", hEventToSignal));
     935                int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
     936                AssertRC(rc);
     937            }
     938
     939            /*
     940             * Restore HM context if needed.
     941             */
     942            if (!fLeaveCtx)
     943            { /* contention should be unlikely */ }
     944            else
     945                VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
     946
     947# ifdef DEBUG_bird
     948            VMMTrashVolatileXMMRegs();
     949# endif
     950            return VINF_SUCCESS;
    833951        }
    834952
    835 # if defined(DEBUG_bird) && defined(IN_RING0)
    836         VMMTrashVolatileXMMRegs();
    837 # endif
     953        /*
     954         * Darn, someone raced in on us.  Restore the state (this works only
     955         * because the semaphore is effectively controlling ownership).
     956         */
     957        bool fRc;
     958        RTNATIVETHREAD hMessOwner = NIL_RTNATIVETHREAD;
     959        ASMAtomicCmpXchgExHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf, NIL_RTNATIVETHREAD, fRc, &hMessOwner);
     960        AssertLogRelMsgReturn(fRc, ("pCritSect=%p hMessOwner=%p\n", pCritSect, hMessOwner),
     961                              pdmCritSectCorrupted(pCritSect, "owner race"));
     962        STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
     963# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
     964        //pCritSect->s.Core.cNestings = 1;
     965        Assert(pCritSect->s.Core.cNestings == 1);
     966# else
     967        //Assert(pCritSect->s.Core.cNestings == 0);
     968        ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
     969# endif
     970        Assert(hEventToSignal == NIL_SUPSEMEVENT);
    838971    }
    839 #endif  /* IN_RING3 || IN_RING0 */
    840 #ifdef IN_RING0
    841     else
    842 #endif
    843 #if defined(IN_RING0) || defined(IN_RC)
     972
     973
     974#else /* IN_RC */
     975    /*
     976     * Raw-mode: Try leave it.
     977     */
     978# error "This context is not use..."
     979    if (pCritSect->s.Core.cLockers == 0)
    844980    {
     981# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
     982        //pCritSect->s.Core.cNestings = 0; /* not really needed */
     983# else
     984        ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
     985# endif
     986        ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
     987        STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
     988
     989        ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
     990        if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
     991            return VINF_SUCCESS;
     992
    845993        /*
    846          * Try leave it.
     994         * Darn, someone raced in on us.  Restore the state (this works only
     995         * because the semaphore is effectively controlling ownership).
    847996         */
    848         if (pCritSect->s.Core.cLockers == 0)
    849         {
     997        bool fRc;
     998        RTNATIVETHREAD hMessOwner = NIL_RTNATIVETHREAD;
     999        ASMAtomicCmpXchgExHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf, NIL_RTNATIVETHREAD, fRc, &hMessOwner);
     1000        AssertLogRelMsgReturn(fRc, ("pCritSect=%p hMessOwner=%p\n", pCritSect, hMessOwner),
     1001                              pdmCritSectCorrupted(pCritSect, "owner race"));
     1002        STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
    8501003# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
    851             //pCritSect->s.Core.cNestings = 0; /* not really needed */
     1004        //pCritSect->s.Core.cNestings = 1;
     1005        Assert(pCritSect->s.Core.cNestings == 1);
    8521006# else
    853             ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
    854 # endif
    855             RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
    856             ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
    857             STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
    858 
    859             ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
    860             if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
    861                 return VINF_SUCCESS;
    862 
    863             /* darn, someone raced in on us. */
    864             ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
    865             STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
    866 # ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
    867             //pCritSect->s.Core.cNestings = 1;
    868             Assert(pCritSect->s.Core.cNestings == 1);
    869 # else
    870             //Assert(pCritSect->s.Core.cNestings == 0);
    871             ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
    872 # endif
    873         }
    874         ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
    875 
    876         /*
    877          * Queue the request.
    878          */
    879         PVMCPUCC    pVCpu = VMMGetCpu(pVM);                 AssertPtr(pVCpu);
    880         uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectLeaves++;
    881         LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
    882         AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
    883 /** @todo This doesn't work any more for devices. */
    884         pVCpu->pdm.s.apQueuedCritSectLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
    885         VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
    886         VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
    887         STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
    888         STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
     1007        //Assert(pCritSect->s.Core.cNestings == 0);
     1008        ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
     1009# endif
    8891010    }
    890 #endif /* IN_RING0 || IN_RC */
     1011#endif /* IN_RC */
     1012
     1013
     1014#ifndef IN_RING3
     1015    /*
     1016     * Ring-0/raw-mode: Unable to leave. Queue the leave for ring-3.
     1017     */
     1018    ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
     1019# ifndef IN_RING0
     1020    PVMCPUCC    pVCpu = VMMGetCpu(pVM);
     1021# endif
     1022    uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectLeaves++;
     1023    LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
     1024    AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
     1025    pVCpu->pdm.s.apQueuedCritSectLeaves[i] = pCritSect->s.pSelfR3;
     1026    VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); /** @todo handle VMCPU_FF_PDM_CRITSECT in ring-0 outside the no-call-ring-3 part. */
     1027    VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* unnecessary paranoia */
     1028    STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
     1029    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
    8911030
    8921031    return VINF_SUCCESS;
     1032#endif /* IN_RING3 */
    8931033}
    8941034
  • trunk/src/VBox/VMM/include/PDMInternal.h

    r90486 r90504  
    433433    /** Support driver event semaphore that is scheduled to be signaled upon leaving
    434434     * the critical section. This is only for Ring-3 and Ring-0. */
    435     SUPSEMEVENT                     hEventToSignal;
     435    SUPSEMEVENT volatile            hEventToSignal;
    436436    /** The lock name. */
    437437    R3PTRTYPE(const char *)         pszName;
     438    /** The ring-3 pointer to this critical section, for leave queueing. */
     439    R3PTRTYPE(PPDMCRITSECT)         pSelfR3;
    438440    /** R0/RC lock contention. */
    439441    STAMCOUNTER                     StatContentionRZLock;
    440     /** R0/RC lock contention, returning rcBusy or VERR_SEM_BUSY (try). */
     442    /** R0/RC lock contention: returning rcBusy or VERR_SEM_BUSY (try). */
    441443    STAMCOUNTER                     StatContentionRZLockBusy;
     444    /** R0/RC lock contention: Profiling waiting time. */
     445    STAMPROFILE                     StatContentionRZWait;
    442446    /** R0/RC unlock contention. */
    443447    STAMCOUNTER                     StatContentionRZUnlock;
    444448    /** R3 lock contention. */
    445449    STAMCOUNTER                     StatContentionR3;
    446     /** Profiling waiting on the lock (all rings). */
    447     STAMPROFILE                     StatContentionWait;
     450    /** R3 lock contention: Profiling waiting time. */
     451    STAMPROFILE                     StatContentionR3Wait;
    448452    /** Profiling the time the section is locked. */
    449453    STAMPROFILEADV                  StatLocked;
     
    456460 * timeout, interruption or pending thread termination. */
    457461#define PDMCRITSECT_MAGIC_FAILED_ABORT      UINT32_C(0x0bad0326)
     462/** Special magic value set if we detected data/state corruption. */
     463#define PDMCRITSECT_MAGIC_CORRUPTED         UINT32_C(0x0bad2603)
    458464
    459465/** Indicates that the critical section is queued for unlock.
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette