VirtualBox

Changeset 91808 in vbox for trunk


Ignore:
Timestamp:
Oct 18, 2021 9:20:07 AM (3 years ago)
Author:
vboxsync
Message:

VMM/PDMCritSect: Don't preempt while on custom stack. bugref:10124

Location:
trunk/src/VBox/VMM
Files:
1 added
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r91768 r91808  
    570570VMMR0_SOURCES.x86 = \
    571571        VMMR0/VMMR0JmpA-x86.asm
     572VMMR0_SOURCES.darwin.amd64 = \
     573        VMMR0/VMMR0StackBack-darwin.asm
    572574
    573575VMMR0_LIBS = \
  • trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp

    r90910 r91808  
    418418
    419419
     420#if defined(VMM_R0_SWITCH_STACK) && defined(IN_RING0)
     421/**
     422 * We must be on kernel stack before disabling preemption, thus this wrapper.
     423 */
     424DECLASM(int) StkBack_pdmR0CritSectEnterContendedOnKrnlStk(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECT pCritSect,
     425                                                          RTNATIVETHREAD hNativeSelf, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
     426{
     427    VMMR0EMTBLOCKCTX Ctx;
     428    int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pCritSect, &Ctx);
     429    if (rc == VINF_SUCCESS)
     430    {
     431        Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     432
     433        rc = pdmR3R0CritSectEnterContended(pVM, pVCpu, pCritSect, hNativeSelf, pSrcPos, rcBusy);
     434
     435        VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
     436    }
     437    else
     438        STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
     439    return rc;
     440}
     441decltype(StkBack_pdmR0CritSectEnterContendedOnKrnlStk) pdmR0CritSectEnterContendedOnKrnlStk;
     442#endif
     443
     444
    420445/**
    421446 * Common worker for the debug and normal APIs.
     
    518543    if (pVCpu)
    519544    {
     545#  ifndef VMM_R0_SWITCH_STACK
    520546        VMMR0EMTBLOCKCTX Ctx;
    521547        int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pCritSect, &Ctx);
     
    531557            STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
    532558        return rc;
     559#  else
     560        return pdmR0CritSectEnterContendedOnKrnlStk(pVM, pVCpu, pCritSect, hNativeSelf, rcBusy, pSrcPos);
     561#  endif
    533562    }
    534563
     
    778807#endif /* IN_RING3 */
    779808
     809
     810#if defined(VMM_R0_SWITCH_STACK) && defined(IN_RING0)
     811/**
     812 * We must be on kernel stack before disabling preemption, thus this wrapper.
     813 */
     814DECLASM(int) StkBack_pdmR0CritSectLeaveSignallingOnKrnlStk(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECT pCritSect,
     815                                                           int32_t const cLockers, SUPSEMEVENT const hEventToSignal)
     816{
     817    VMMR0EMTBLOCKCTX    Ctx;
     818    bool                fLeaveCtx = false;
     819    if (cLockers < 0)
     820        AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));
     821    else
     822    {
     823        /* Someone is waiting, wake up one of them. */
     824        Assert(cLockers < _8K);
     825        SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
     826        if (!RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
     827        {
     828            int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
     829            VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
     830            fLeaveCtx = true;
     831        }
     832        int rc = SUPSemEventSignal(pVM->pSession, hEvent);
     833        AssertRC(rc);
     834    }
     835
     836    /*
     837     * Signal exit event.
     838     */
     839    if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
     840    { /* likely */ }
     841    else
     842    {
     843        if (!fLeaveCtx && pVCpu != NULL && !RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
     844        {
     845            int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
     846            VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
     847            fLeaveCtx = true;
     848        }
     849        Log8(("Signalling %#p\n", hEventToSignal));
     850        int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
     851        AssertRC(rc);
     852    }
     853
     854    /*
     855     * Restore HM context if needed.
     856     */
     857    if (!fLeaveCtx)
     858    { /* contention should be unlikely */ }
     859    else
     860        VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
     861
     862# ifdef DEBUG_bird
     863    VMMTrashVolatileXMMRegs();
     864# endif
     865    return VINF_SUCCESS;
     866}
     867decltype(StkBack_pdmR0CritSectLeaveSignallingOnKrnlStk) pdmR0CritSectLeaveSignallingOnKrnlStk;
     868#endif
    780869
    781870/**
     
    9341023        if (!fQueueIt)
    9351024        {
     1025# ifndef VMM_R0_SWITCH_STACK
    9361026            VMMR0EMTBLOCKCTX    Ctx;
    9371027            bool                fLeaveCtx = false;
     
    9791069                VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
    9801070
    981 # ifdef DEBUG_bird
     1071#  ifdef DEBUG_bird
    9821072            VMMTrashVolatileXMMRegs();
    983 # endif
     1073#  endif
    9841074            return VINF_SUCCESS;
     1075# else
     1076            return pdmR0CritSectLeaveSignallingOnKrnlStk(pVM, pVCpu, pCritSect, cLockers, hEventToSignal);
     1077# endif
    9851078        }
    9861079
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette