VirtualBox

Changeset 45293 in vbox for trunk/src


Ignore:
Timestamp:
Apr 2, 2013 6:42:50 PM (12 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
84691
Message:

PGMCritSectRw: Prep for ring-0 and raw-mode context operation.

Location:
trunk/src/VBox/VMM
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r45276 r45293  
    421421        VMMAll/PDMAll.cpp \
    422422        VMMAll/PDMAllCritSect.cpp \
     423        VMMAll/PDMAllCritSectRw.cpp \
    423424        VMMAll/PDMAllCritSectBoth.cpp \
    424425        VMMAll/PDMAllQueue.cpp \
     
    529530        VMMAll/PDMAll.cpp \
    530531        VMMAll/PDMAllCritSect.cpp \
     532        VMMAll/PDMAllCritSectRw.cpp \
    531533        VMMAll/PDMAllCritSectBoth.cpp \
    532534        VMMAll/PDMAllQueue.cpp \
  • trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp

    r45189 r45293  
    207207                return VERR_SEM_BUSY;
    208208
    209             /* Add ourselves to the queue and wait for the direction to change. */
     209#if defined(IN_RING3)
     210            /*
     211             * Add ourselves to the queue and wait for the direction to change.
     212             */
    210213            uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
    211214            c++;
     
    225228                {
    226229                    int rc;
    227 #if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
     230# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
    228231                    rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
    229232                                                               RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
    230233                    if (RT_SUCCESS(rc))
    231 #else
     234# else
    232235                    RTTHREAD hThreadSelf = RTThreadSelf();
    233236                    RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
    234 #endif
     237# endif
    235238                    {
    236239                        do
     
    295298                }
    296299
    297 #if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
     300# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
    298301                RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
    299 #endif
     302# endif
    300303                break;
    301304            }
     305
     306#else
     307            /*
     308             * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
     309             * back to ring-3 and do it there or return rcBusy.
     310             */
     311            if (rcBusy == VINF_SUCCESS)
     312            {
     313                PVM     pVM   = pThis->s.CTX_SUFF(pVM);     AssertPtr(pVM);
     314                PVMCPU  pVCpu = VMMGetCpu(pVM);             AssertPtr(pVCpu);
     315                /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
     316                 *        back to ring-3. Goes for both kind of crit sects. */
     317                return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
     318            }
     319            return rcBusy;
     320#endif
    302321        }
    303322
     
    342361VMMDECL(int) PDMCritSectRwEnterShared(PPDMCRITSECTRW pThis, int rcBusy)
    343362{
    344 #if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
     363#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
    345364    return pdmCritSectRwEnterShared(pThis, rcBusy, NULL, false /*fTryOnly*/);
    346365#else
     
    375394VMMDECL(int) PDMCritSectRwEnterSharedDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
    376395{
     396#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
     397    return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, NULL, false /*fTryOnly*/);
     398#else
    377399    RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
    378400    return pdmCritSectRwEnterShared(pThis, rcBusy, &SrcPos, false /*fTryOnly*/);
     401#endif
    379402}
    380403
     
    401424VMMDECL(int) PDMCritSectRwTryEnterShared(PPDMCRITSECTRW pThis)
    402425{
    403 #if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
     426#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
    404427    return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, NULL, true /*fTryOnly*/);
    405428#else
     
    431454VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
    432455{
     456#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
     457    return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, NULL, true /*fTryOnly*/);
     458#else
    433459    RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
    434460    return pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, &SrcPos, true /*fTryOnly*/);
    435 }
     461#endif
     462}
     463
     464
     465#ifdef IN_RING3
     466/**
     467 * Enters a PDM read/write critical section with shared (read) access.
     468 *
     469 * @returns VINF_SUCCESS if entered successfully.
     470 * @retval  VERR_SEM_DESTROYED if the critical section is delete before or
     471 *          during the operation.
     472 *
     473 * @param   pThis       Pointer to the read/write critical section.
     474 * @param   fCallRing3  Whether this is a VMMRZCallRing3()request.
     475 */
     476VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PPDMCRITSECTRW pThis, bool fCallRing3)
     477{
     478    int rc = pdmCritSectRwEnterShared(pThis, VERR_SEM_BUSY, NULL, false /*fTryAgain*/);
     479    if (    rc == VINF_SUCCESS
     480        &&  fCallRing3
     481        &&  pThis->s.Core.pValidatorRead)
     482    {
     483        Assert(pThis->s.Core.pValidatorWrite);
     484        if (pThis->s.Core.hNativeWriter == NIL_RTNATIVETHREAD)
     485            RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
     486        else
     487            RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
     488    }
     489    return rc;
     490}
     491#endif /* IN_RING3 */
    436492
    437493
     
    484540            else
    485541            {
    486                 /* Reverse the direction and signal the reader threads. */
     542#if defined(IN_RING3)
     543                /* Reverse the direction and signal the writer threads. */
    487544                u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
    488545                u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
     
    493550                    break;
    494551                }
     552#else
     553                /* Queue the exit request (ring-3). */
     554                PVM         pVM   = pThis->s.CTX_SUFF(pVM);         AssertPtr(pVM);
     555                PVMCPU      pVCpu = VMMGetCpu(pVM);                 AssertPtr(pVCpu);
     556                uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
     557                LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
     558                AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves));
     559                pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = MMHyperCCToR3(pVM, pThis);
     560                VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
     561                VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
     562                STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
     563                STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
     564#endif
    495565            }
    496566
     
    614684     */
    615685    bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
     686#if defined(IN_RING3)
    616687              && (  ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
    617                   || fTryOnly);
     688                  || fTryOnly)
     689#endif
     690               ;
    618691    if (fDone)
    619692        ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
    620693    if (!fDone)
    621694    {
     695#if defined(IN_RING3)
    622696        /*
    623697         * Wait for our turn.
     
    626700        {
    627701            int rc;
    628 #if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
     702# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
    629703            if (!fTryOnly)
    630704            {
     
    637711                rc = VINF_SUCCESS;
    638712            if (RT_SUCCESS(rc))
    639 #else
     713# else
    640714            RTTHREAD hThreadSelf = RTThreadSelf();
    641715            RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
    642 #endif
     716# endif
    643717            {
    644718                do
     
    676750            AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
    677751        }
     752
     753#else
     754        /* We cannot call SUPSemEventWaitNoResume in this context. Go back to
     755           ring-3 and do it there or return rcBusy. */
     756        for (;;)
     757        {
     758            u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
     759            uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
     760            c--;
     761            u64State &= ~RTCSRW_CNT_WR_MASK;
     762            u64State |= c << RTCSRW_CNT_WR_SHIFT;
     763            if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
     764                break;
     765        }
     766
     767        if (rcBusy == VINF_SUCCESS)
     768        {
     769            PVM     pVM   = pThis->s.CTX_SUFF(pVM);     AssertPtr(pVM);
     770            PVMCPU  pVCpu = VMMGetCpu(pVM);             AssertPtr(pVCpu);
     771            /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
     772             *        back to ring-3. Goes for both kind of crit sects. */
     773            return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
     774        }
     775        return rcBusy;
     776
     777#endif
    678778    }
    679779
     
    715815VMMDECL(int) PDMCritSectRwEnterExcl(PPDMCRITSECTRW pThis, int rcBusy)
    716816{
    717 #if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
     817#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
    718818    return pdmCritSectRwEnterExcl(pThis, rcBusy, NULL, false /*fTryAgain*/);
    719819#else
     
    749849VMMDECL(int) PDMCritSectRwEnterExclDebug(PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
    750850{
     851#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
     852    return pdmCritSectRwEnterExcl(pThis, rcBusy, NULL, false /*fTryAgain*/);
     853#else
    751854    RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
    752855    return pdmCritSectRwEnterExcl(pThis, rcBusy, &SrcPos, false /*fTryAgain*/);
     856#endif
    753857}
    754858
     
    771875VMMDECL(int) PDMCritSectRwTryEnterExcl(PPDMCRITSECTRW pThis)
    772876{
    773 #if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
     877#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
    774878    return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, NULL, true /*fTryAgain*/);
    775879#else
     
    801905VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
    802906{
     907#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
     908    return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, NULL, true /*fTryAgain*/);
     909#else
    803910    RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
    804911    return pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, &SrcPos, true /*fTryAgain*/);
    805 }
     912#endif
     913}
     914
     915
     916#ifdef IN_RING3
     917/**
     918 * Enters a PDM read/write critical section with exclusive (write) access.
     919 *
     920 * @returns VINF_SUCCESS if entered successfully.
     921 * @retval  VERR_SEM_DESTROYED if the critical section is delete before or
     922 *          during the operation.
     923 *
     924 * @param   pThis       Pointer to the read/write critical section.
     925 * @param   fCallRing3  Whether this is a VMMRZCallRing3()request.
     926 */
     927VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PPDMCRITSECTRW pThis, bool fCallRing3)
     928{
     929    int rc = pdmCritSectRwEnterExcl(pThis, VERR_SEM_BUSY, NULL, false /*fTryAgain*/);
     930    if (    rc == VINF_SUCCESS
     931        &&  fCallRing3
     932        &&  pThis->s.Core.pValidatorWrite
     933        &&  pThis->s.Core.pValidatorWrite->hThread != NIL_RTTHREAD)
     934        RTLockValidatorRecExclReleaseOwnerUnchecked(pThis->s.Core.pValidatorWrite);
     935    return rc;
     936}
     937#endif /* IN_RING3 */
    806938
    807939
     
    842974         * Update the state.
    843975         */
     976#if defined(IN_RING3)
    844977        ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
     978        STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
    845979        ASMAtomicWriteHandle(&pThis->s.Core.hNativeWriter, NIL_RTNATIVETHREAD);
    846         STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
    847980
    848981        for (;;)
     
    8901023                return VERR_SEM_DESTROYED;
    8911024        }
     1025#else
     1026        /*
     1027         * We cannot call neither SUPSemEventSignal nor SUPSemEventMultiSignal,
     1028         * so queue the exit request (ring-3).
     1029         */
     1030        PVM         pVM   = pThis->s.CTX_SUFF(pVM);         AssertPtr(pVM);
     1031        PVMCPU      pVCpu = VMMGetCpu(pVM);                 AssertPtr(pVCpu);
     1032        uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
     1033        LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
     1034        AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
     1035        pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = MMHyperCCToR3(pVM, pThis);
     1036        VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
     1037        VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
     1038        STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
     1039        STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
     1040#endif
    8921041    }
    8931042    else
  • trunk/src/VBox/VMM/VMMR3/VMM.cpp

    r45276 r45293  
    8181#include <VBox/vmm/pdmqueue.h>
    8282#include <VBox/vmm/pdmcritsect.h>
     83#include <VBox/vmm/pdmcritsectrw.h>
    8384#include <VBox/vmm/pdmapi.h>
    8485#include <VBox/vmm/cpum.h>
     
    21162117
    21172118        /*
     2119         * Enter a r/w critical section exclusively.
     2120         */
     2121        case VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL:
     2122        {
     2123            pVCpu->vmm.s.rcCallRing3 = PDMR3CritSectRwEnterExclEx((PPDMCRITSECTRW)(uintptr_t)pVCpu->vmm.s.u64CallRing3Arg,
     2124                                                                    true /*fCallRing3*/);
     2125            break;
     2126        }
     2127
     2128        /*
     2129         * Enter a r/w critical section shared.
     2130         */
     2131        case VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED:
     2132        {
     2133            pVCpu->vmm.s.rcCallRing3 = PDMR3CritSectRwEnterSharedEx((PPDMCRITSECTRW)(uintptr_t)pVCpu->vmm.s.u64CallRing3Arg,
     2134                                                                    true /*fCallRing3*/);
     2135            break;
     2136        }
     2137
     2138        /*
    21182139         * Acquire the PDM lock.
    21192140         */
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette