VirtualBox

Changeset 31392 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Aug 5, 2010 11:41:45 AM (14 years ago)
Author:
vboxsync
Message:

PDMCritSectEnter: Wait for critical sections in ring-0 when preemption and interrupts are enabled. Sketches for how we can wait from VT-x/AMD-V context.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp

    r30328 r31392  
    103103
    104104
    105 #ifdef IN_RING3
    106 /**
    107  * Deals with the contended case in ring-3.
     105#if defined(IN_RING3) || defined(IN_RING0)
     106/**
     107 * Deals with the contended case in ring-3 and ring-0.
    108108 *
    109109 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
     
    111111 * @param   hNativeSelf         The native thread handle.
    112112 */
    113 static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
     113static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
    114114{
    115115    /*
     
    118118    if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
    119119        return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
     120# ifdef IN_RING3
    120121    STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
     122# else
     123    STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
     124# endif
    121125
    122126    /*
     
    125129    PSUPDRVSESSION  pSession    = pCritSect->s.CTX_SUFF(pVM)->pSession;
    126130    SUPSEMEVENT     hEvent      = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
    127 # ifdef PDMCRITSECT_STRICT
     131# ifdef IN_RING3
     132#  ifdef PDMCRITSECT_STRICT
    128133    RTTHREAD        hThreadSelf = RTThreadSelfAutoAdopt();
    129134    int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
    130135    if (RT_FAILURE(rc2))
    131136        return rc2;
    132 # else
     137#  else
    133138    RTTHREAD        hThreadSelf = RTThreadSelf();
     139#  endif
    134140# endif
    135141    for (;;)
     
    141147        if (RT_FAILURE(rc9))
    142148            return rc9;
    143 # else
     149# elif defined(IN_RING3)
    144150        RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
    145151# endif
    146152        int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
     153# ifdef IN_RING3
    147154        RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
     155# endif
    148156
    149157        if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
     
    155163    /* won't get here */
    156164}
    157 #endif /* IN_RING3 */
     165#endif /* IN_RING3 || IN_RING0 */
    158166
    159167
     
    221229     * Take the slow path.
    222230     */
    223     return pdmR3CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
    224 #else
    225     /*
    226      * Return busy.
    227      */
     231    return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
     232
     233#elif defined(IN_RING0)
     234    /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
     235     *        and would be better off switching out of that while waiting for
     236     *        the lock.  Several of the locks jumps back to ring-3 just to
     237     *        get the lock, the ring-3 code will then call the kernel to do
     238     *        the lock wait and when the call return it will call ring-0
     239     *        again and resume via in setjmp style.  Not very efficient. */
     240# if 0
     241    if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
     242                             * callers not prepared for longjmp/blocking to
     243                             * use PDMCritSectTryEnter. */
     244    {
     245        /*
     246         * Leave HWACCM context while waiting if necessary.
     247         */
     248        int rc;
     249        if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
     250        {
     251            STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock,    1000000);
     252            rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
     253        }
     254        else
     255        {
     256            STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
     257            PVM     pVM   = pCritSect->s.CTX_SUFF(pVM);
     258            PVMCPU  pVCpu = VMMGetCpu(pVM);
     259            HWACCMR0Leave(pVM, pVCpu);
     260            RTThreadPreemptRestore(NIL_RTTHREAD, ????);
     261
     262            rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
     263
     264            RTThreadPreemptDisable(NIL_RTTHREAD, ????);
     265            HWACCMR0Enter(pVM, pVCpu);
     266        }
     267        return rc;
     268    }
     269# else
     270    /*
     271     * We preemption hasn't been disabled, we can block here in ring-0.
     272     */
     273    if (   RTThreadPreemptIsEnabled(NIL_RTTHREAD)
     274        && ASMIntAreEnabled())
     275        return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
     276# endif
     277
    228278    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
    229279    LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
    230280    return rcBusy;
    231 #endif
     281
     282#else  /* IN_RC */
     283    /*
     284     * Return busy.
     285     */
     286    STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
     287    LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
     288    return rcBusy;
     289#endif /* IN_RC */
    232290}
    233291
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette