VirtualBox

Changeset 22039 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Aug 6, 2009 4:31:07 PM (15 years ago)
Author:
vboxsync
Message:

PDMCritSectLeave: It's not safe to call SUPSemEventSignal with interrupts disabled anywhere atm.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp

    r21591 r22039  
    329329    }
    330330
     331#ifdef IN_RING0
     332# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
     333    if (1) /* SUPSemEventSignal is safe */
     334# else
     335    if (ASMIntAreEnabled())
     336# endif
     337#endif
    331338#if defined(IN_RING3) || defined(IN_RING0)
    332     /*
    333      * Leave for real.
    334      */
    335     /* update members. */
     339    {
     340        /*
     341         * Leave for real.
     342         */
     343        /* update members. */
    336344# ifdef IN_RING3
    337     RTSEMEVENT hEventToSignal    = pCritSect->s.EventToSignal;
    338     pCritSect->s.EventToSignal   = NIL_RTSEMEVENT;
     345        RTSEMEVENT hEventToSignal    = pCritSect->s.EventToSignal;
     346        pCritSect->s.EventToSignal   = NIL_RTSEMEVENT;
    339347#  if defined(PDMCRITSECT_STRICT)
    340     if (pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD)
    341         RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner);
    342     ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD);
     348        if (pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD)
     349            RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner);
     350        ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD);
    343351#  endif
    344352# endif
    345     ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
    346     Assert(pCritSect->s.Core.Strict.ThreadOwner == NIL_RTTHREAD);
    347     ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
    348     ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
    349 
    350     /* stop and decrement lockers. */
    351     STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
    352     ASMCompilerBarrier();
    353     if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
    354     {
    355         /* Someone is waiting, wake up one of them. */
    356         SUPSEMEVENT     hEvent   = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
    357         PSUPDRVSESSION  pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
    358         int rc = SUPSemEventSignal(pSession, hEvent);
    359         AssertRC(rc);
    360     }
     353        ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
     354        Assert(pCritSect->s.Core.Strict.ThreadOwner == NIL_RTTHREAD);
     355        ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
     356        ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
     357
     358        /* stop and decrement lockers. */
     359        STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
     360        ASMCompilerBarrier();
     361        if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
     362        {
     363            /* Someone is waiting, wake up one of them. */
     364            SUPSEMEVENT     hEvent   = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
     365            PSUPDRVSESSION  pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
     366            int rc = SUPSemEventSignal(pSession, hEvent);
     367            AssertRC(rc);
     368        }
    361369
    362370# ifdef IN_RING3
    363     /* Signal exit event. */
    364     if (hEventToSignal != NIL_RTSEMEVENT)
    365     {
    366         LogBird(("Signalling %#x\n", hEventToSignal));
    367         int rc = RTSemEventSignal(hEventToSignal);
    368         AssertRC(rc);
    369     }
     371        /* Signal exit event. */
     372        if (hEventToSignal != NIL_RTSEMEVENT)
     373        {
     374            LogBird(("Signalling %#x\n", hEventToSignal));
     375            int rc = RTSemEventSignal(hEventToSignal);
     376            AssertRC(rc);
     377        }
    370378# endif
    371379
    372380# if defined(DEBUG_bird) && defined(IN_RING0)
    373     VMMTrashVolatileXMMRegs();
    374 # endif
    375 
    376 #else  /* IN_RC */
    377     /*
    378      * Try leave it.
    379      */
    380     if (pCritSect->s.Core.cLockers == 0)
    381     {
    382         ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
    383         RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
    384         ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
    385         STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
    386 
    387         ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
    388         if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
    389             return;
    390 
    391         /* darn, someone raced in on us. */
    392         ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
    393         STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
    394         ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
    395     }
    396     ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
    397 
    398     /*
    399      * Queue the request.
    400      */
    401     PVM         pVM   = pCritSect->s.CTX_SUFF(pVM);     AssertPtr(pVM);
    402     PVMCPU      pVCpu = VMMGetCpu(pVM);                 AssertPtr(pVCpu);
    403     uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectLeaves++;
    404     LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
    405     AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
    406     pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
    407     VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
    408     VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
    409     STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
    410     STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
    411 #endif /* IN_RC */
     381        VMMTrashVolatileXMMRegs();
     382# endif
     383    }
     384#endif  /* IN_RING3 || IN_RING0 */
     385#ifdef IN_RING0
     386    else
     387#endif
     388#if defined(IN_RING0) || defined(IN_RC)
     389    {
     390        /*
     391         * Try leave it.
     392         */
     393        if (pCritSect->s.Core.cLockers == 0)
     394        {
     395            ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
     396            RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
     397            ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
     398            STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
     399
     400            ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
     401            if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
     402                return;
     403
     404            /* darn, someone raced in on us. */
     405            ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
     406            STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
     407            ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
     408        }
     409        ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
     410
     411        /*
     412         * Queue the request.
     413         */
     414        PVM         pVM   = pCritSect->s.CTX_SUFF(pVM);     AssertPtr(pVM);
     415        PVMCPU      pVCpu = VMMGetCpu(pVM);                 AssertPtr(pVCpu);
     416        uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectLeaves++;
     417        LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
     418        AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
     419        pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
     420        VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
     421        VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
     422        STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
     423        STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
     424    }
     425#endif /* IN_RING0 || IN_RC */
    412426}
    413427
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette