VirtualBox

Changeset 90531 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Aug 5, 2021 8:54:46 PM (4 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
146137
Message:

VMM/PDMCritSect: When VMCPU_FF_PDM_CRITSECT is set we must not wait on any lock if we can help it as we might be sitting on a lock order violation. Corrected incorrect NIL_RTNATIVETHREAD test in the leave function. Logging. bugref:6695

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp

    r90515 r90531  
    114114DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
    115115{
     116    Assert(hNativeSelf != NIL_RTNATIVETHREAD);
    116117    AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
    117118    Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
     
    130131    NOREF(pSrcPos);
    131132# endif
     133    if (pSrcPos)
     134        Log12Func(("%p: uId=%p ln=%u fn=%s\n", pCritSect, pSrcPos->uId, pSrcPos->uLine, pSrcPos->pszFunction));
     135    else
     136        Log12Func(("%p\n", pCritSect));
    132137
    133138    STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
     
    154159                                         PCRTLOCKVALSRCPOS pSrcPos, int rcBusy)
    155160{
     161# ifdef IN_RING0
     162    /*
     163     * If we've got queued critical section leave operations and rcBusy isn't
     164     * VINF_SUCCESS, return to ring-3 immediately to avoid deadlocks.
     165     */
     166    if (   !pVCpu
     167        || !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT)
     168        || rcBusy == VINF_SUCCESS )
     169    { /* likely */ }
     170    else
     171    {
     172        /** @todo statistics. */
     173        STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
     174        return rcBusy;
     175    }
     176# endif
     177
    156178    /*
    157179     * Start waiting.
     
    221243                     ? SUPSemEventWaitNoResume(pSession, hEvent, cMsMaxOne)
    222244                     : SUPSemEventWait(pSession, hEvent, cMsMaxOne);
     245        Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hOwner=%p)\n",
     246                   pCritSect, rc, RTTimeNanoTS() - tsStart, cMsMaxOne, pCritSect->s.Core.NativeThreadOwner));
    223247# endif /* IN_RING0 */
    224248
     
    319343                        }
    320344                        cCmpXchgs++;
     345                        if ((cCmpXchgs & 0xffff) == 0)
     346                            Log11Func(("%p: cLockers=%d cCmpXchgs=%u (hOwner=%p)\n",
     347                                       pCritSect, cLockers, cCmpXchgs, pCritSect->s.Core.NativeThreadOwner));
    321348                        ASMNopPause();
    322349                        continue;
     
    446473# endif
    447474        ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
     475        Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, pCritSect->s.Core.cNestings, pCritSect->s.Core.cLockers));
    448476        return VINF_SUCCESS;
    449477    }
     
    654682# endif
    655683        ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
     684        Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, pCritSect->s.Core.cNestings, pCritSect->s.Core.cLockers));
    656685        return VINF_SUCCESS;
    657686    }
     
    781810     */
    782811    RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
    783     VMM_ASSERT_RELEASE_MSG_RETURN(pVM, pCritSect->s.Core.NativeThreadOwner == hNativeSelf || hNativeSelf == NIL_RTNATIVETHREAD,
     812    VMM_ASSERT_RELEASE_MSG_RETURN(pVM, pCritSect->s.Core.NativeThreadOwner == hNativeSelf && hNativeSelf != NIL_RTNATIVETHREAD,
    784813                                  ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
    785814                                   pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
     
    799828        ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, cNestings - 1);
    800829#endif
    801         ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
    802         Assert(pCritSect->s.Core.cLockers >= 0);
     830        int32_t const cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
     831        Assert(cLockers >= 0); RT_NOREF(cLockers);
     832        Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, cNestings - 1, cLockers));
    803833        return VINF_SEM_NESTED;
    804834    }
    805835
     836    Log12Func(("%p: cNestings=%d cLockers=%d hOwner=%p - leave for real\n",
     837               pCritSect, cNestings, pCritSect->s.Core.cLockers, pCritSect->s.Core.NativeThreadOwner));
    806838
    807839#ifdef IN_RING3
     
    839871        /* Someone is waiting, wake up one of them. */
    840872        Assert(cLockers < _8K);
     873        Log8(("PDMCritSectLeave: Waking up %p (cLockers=%u)\n", pCritSect, cLockers));
    841874        SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
    842875        int rc = SUPSemEventSignal(pVM->pSession, hEvent);
     
    849882    else
    850883    {
    851         Log8(("Signalling %#p\n", hEventToSignal));
     884        Log8(("PDMCritSectLeave: Signalling %#p (%p)\n", hEventToSignal, pCritSect));
    852885        int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
    853886        AssertRC(rc);
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette