VirtualBox

Changeset 90486 in vbox for trunk/src


Ignore:
Timestamp:
Aug 2, 2021 8:40:40 PM (4 years ago)
Author:
vboxsync
Message:

VMM/PDM: Tighten read/write critical section code a bit. bugref:6695

Location:
trunk/src/VBox/VMM
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp

    r90472 r90486  
    881881        LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
    882882        AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
     883/** @todo This doesn't work any more for devices. */
    883884        pVCpu->pdm.s.apQueuedCritSectLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
    884885        VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
  • trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp

    r90385 r90486  
    4646*********************************************************************************************************************************/
    4747/** The number loops to spin for shared access in ring-3. */
    48 #define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3       20
     48#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3        20
    4949/** The number loops to spin for shared access in ring-0. */
    50 #define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0       128
     50#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0        128
    5151/** The number loops to spin for shared access in the raw-mode context. */
    52 #define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC       128
     52#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC        128
    5353
    5454/** The number loops to spin for exclusive access in ring-3. */
    55 #define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3       20
     55#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3        20
    5656/** The number loops to spin for exclusive access in ring-0. */
    57 #define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0       256
     57#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0        256
    5858/** The number loops to spin for exclusive access in the raw-mode context. */
    59 #define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC       256
    60 
     59#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC        256
     60
     61/** Max number of write or write/read recursions. */
     62#define PDM_CRITSECTRW_MAX_RECURSIONS           _1M
    6163
    6264/* Undefine the automatic VBOX_STRICT API mappings. */
     
    9193
    9294
     95DECL_NO_INLINE(static, int) pdmCritSectRwCorrupted(PPDMCRITSECTRW pThis)
     96{
     97    ASMAtomicWriteU32(&pThis->s.Core.u32Magic, PDMCRITSECTRW_MAGIC_CORRUPT);
     98    return VERR_PDM_CRITSECTRW_IPE;
     99}
    93100
    94101
     
    198205            uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
    199206            c++;
    200             Assert(c < RTCSRW_CNT_MASK / 2);
     207            Assert(c < RTCSRW_CNT_MASK / 4);
     208            AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
    201209            u64State &= ~RTCSRW_CNT_RD_MASK;
    202210            u64State |= c << RTCSRW_CNT_RD_SHIFT;
     
    228236        {
    229237            /* Is the writer perhaps doing a read recursion? */
    230             RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
    231238            RTNATIVETHREAD hNativeWriter;
    232239            ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
    233             if (hNativeSelf == hNativeWriter)
     240            if (hNativeWriter != NIL_RTNATIVETHREAD)
    234241            {
     242                RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
     243                if (hNativeSelf == hNativeWriter)
     244                {
    235245#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
    236                 if (!fNoVal)
    237                 {
    238                     int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
    239                     if (RT_FAILURE(rc9))
    240                         return rc9;
     246                    if (!fNoVal)
     247                    {
     248                        int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
     249                        if (RT_FAILURE(rc9))
     250                            return rc9;
     251                    }
     252#endif
     253                    uint32_t const cReads = ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
     254                    Assert(cReads < _16K);
     255                    AssertReturnStmt(cReads < PDM_CRITSECTRW_MAX_RECURSIONS, ASMAtomicDecU32(&pThis->s.Core.cWriterReads),
     256                                     VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
     257                    STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
     258                    return VINF_SUCCESS; /* don't break! */
    241259                }
    242 #endif
    243                 Assert(pThis->s.Core.cWriterReads < UINT32_MAX / 2);
    244                 ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
    245                 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
    246                 return VINF_SUCCESS; /* don't break! */
    247260            }
    248261
     
    268281                c++;
    269282                Assert(c < RTCSRW_CNT_MASK / 2);
     283                AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
    270284
    271285                uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
     
    273287                Assert(cWait <= c);
    274288                Assert(cWait < RTCSRW_CNT_MASK / 2);
     289                AssertReturn(cWait < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
    275290
    276291                u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
     
    317332                            {
    318333                                u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
    319                                 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; Assert(c > 0);
     334                                c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
     335                                AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis));
    320336                                c--;
    321                                 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
     337                                cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
     338                                AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis));
    322339                                cWait--;
    323340                                u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
     
    342359
    343360                        cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
    344                         Assert(cWait > 0);
     361                        AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis));
    345362                        cWait--;
    346363                        u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
     
    633650                    LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
    634651                    AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves));
     652/** @todo This doesn't work any more for devices. */
    635653                    pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = MMHyperCCToR3(pVM, pThis);
    636654                    VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
     
    650668    else
    651669    {
     670        /*
     671         * Write direction. Check that it's the owner calling and that it has reads to undo.
     672         */
    652673        RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
     674        AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
     675
    653676        RTNATIVETHREAD hNativeWriter;
    654677        ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
     
    663686        }
    664687#endif
    665         ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
     688        uint32_t cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
     689        AssertReturn(cDepth < PDM_CRITSECTRW_MAX_RECURSIONS, pdmCritSectRwCorrupted(pThis));
    666690    }
    667691
    668692    return VINF_SUCCESS;
    669693}
     694
    670695
    671696/**
     
    744769     * Check if we're already the owner and just recursing.
    745770     */
    746     RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
     771    RTNATIVETHREAD const hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
     772    AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
    747773    RTNATIVETHREAD hNativeWriter;
    748774    ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
     
    758784        }
    759785#endif
    760         Assert(pThis->s.Core.cWriteRecursions < UINT32_MAX / 2);
    761786        STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
    762         ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
     787        uint32_t const cDepth = ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
     788        AssertReturnStmt(cDepth > 1 && cDepth <= PDM_CRITSECTRW_MAX_RECURSIONS,
     789                         ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions),
     790                         VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
    763791        return VINF_SUCCESS;
    764792    }
     
    777805            /* It flows in the right direction, try follow it before it changes. */
    778806            uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
     807            AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
    779808            c++;
    780             Assert(c < RTCSRW_CNT_MASK / 2);
     809            Assert(c < RTCSRW_CNT_WR_MASK / 4);
    781810            u64State &= ~RTCSRW_CNT_WR_MASK;
    782811            u64State |= c << RTCSRW_CNT_WR_SHIFT;
     
    802831            /* Add ourselves to the write count and break out to do the wait. */
    803832            uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
     833            AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
    804834            c++;
    805             Assert(c < RTCSRW_CNT_MASK / 2);
     835            Assert(c < RTCSRW_CNT_WR_MASK / 4);
    806836            u64State &= ~RTCSRW_CNT_WR_MASK;
    807837            u64State |= c << RTCSRW_CNT_WR_SHIFT;
     
    820850    /*
    821851     * If we're in write mode now try grab the ownership. Play fair if there
    822      * are threads already waiting.
     852     * are threads already waiting, unless we're in ring-0.
    823853     */
    824854    bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
     
    886916                    {
    887917                        u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
    888                         uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
     918                        uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
     919                        AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis));
    889920                        c--;
    890921                        u64State &= ~RTCSRW_CNT_WR_MASK;
     
    920951            {
    921952                u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
    922                 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
     953                uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
     954                AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis));
    923955                c--;
    924956                u64State &= ~RTCSRW_CNT_WR_MASK;
     
    11251157#endif
    11261158
     1159    /*
     1160     * Check ownership.
     1161     */
    11271162    RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
     1163    AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
     1164
    11281165    RTNATIVETHREAD hNativeWriter;
    11291166    ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
     
    11651202
    11661203                uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
    1167                 Assert(c > 0);
     1204                AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis));
    11681205                c--;
    11691206
     
    12171254            uint32_t    i     = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
    12181255            LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
    1219             AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
     1256            AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves));
     1257/** @todo This doesn't work anymore for devices. */
    12201258            pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = MMHyperCCToR3(pVM, pThis);
    12211259            VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
     
    12311269         * Not the final recursion.
    12321270         */
    1233         Assert(pThis->s.Core.cWriteRecursions != 0);
    12341271#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
    12351272        if (fNoVal)
     
    12421279        }
    12431280#endif
    1244         ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
     1281        uint32_t const cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
     1282        AssertReturn(cDepth != 0 && cDepth < UINT32_MAX, pdmCritSectRwCorrupted(pThis));
    12451283    }
    12461284
  • trunk/src/VBox/VMM/include/PDMInternal.h

    r90468 r90486  
    508508typedef PDMCRITSECTRWINT *PPDMCRITSECTRWINT;
    509509
     510/** Special magic value we set the structure has become corrupted. */
     511#define PDMCRITSECTRW_MAGIC_CORRUPT     UINT32_C(0x0bad0620)
    510512
    511513
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette