Changeset 90659 in vbox
- Timestamp:
- Aug 12, 2021 12:18:35 PM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 146271
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp
r90658 r90659 35 35 #ifdef IN_RING3 36 36 # include <iprt/lockvalidator.h> 37 #endif 38 #if defined(IN_RING3) || defined(IN_RING0) 37 39 # include <iprt/semaphore.h> 38 #endif39 #if defined(IN_RING3) || defined(IN_RING0)40 40 # include <iprt/thread.h> 41 41 #endif … … 1132 1132 /* 1133 1133 * If we're in write mode now try grab the ownership. Play fair if there 1134 * are threads already waiting , unless we're in ring-0.1134 * are threads already waiting. 1135 1135 */ 1136 1136 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT) 1137 #if defined(IN_RING3)1138 1137 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1 1139 || fTryOnly) 1140 #endif 1141 ; 1138 || fTryOnly); 1142 1139 if (fDone) 1143 1140 { … … 1514 1511 1515 1512 ASMNopPause(); 1516 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) 1513 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC) 1514 { /*likely*/ } 1515 else 1517 1516 return VERR_SEM_DESTROYED; 1517 ASMNopPause(); 1518 1518 } 1519 1519 … … 1521 1521 #elif defined(IN_RING0) 1522 1522 /* 1523 * Update the state. 1524 */ 1525 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD) 1526 && ASMIntAreEnabled()) 1523 * Ring-0: Try leave for real, depends on host and context. 1524 */ 1525 Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe()); 1526 PVMCPUCC pVCpu = VMMGetCpu(pVM); 1527 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */ 1528 || VMMRZCallRing3IsEnabled(pVCpu) 1529 || RTSemEventIsSignalSafe() 1530 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */ 1531 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */ 1532 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */ 1533 ) 1527 1534 { 1528 1535 # ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF … … 1546 1553 || (u64State & RTCSRW_CNT_RD_MASK) == 0) 1547 1554 { 1548 /* Don't change the direction, wake up the next writer if any. */ 1555 /* 1556 * Don't change the direction, wake up the next writer if any. 1557 */ 1549 1558 u64State &= ~RTCSRW_CNT_WR_MASK; 1550 1559 u64State |= c << RTCSRW_CNT_WR_SHIFT; 1551 1560 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState)) 1552 1561 { 1553 if (c > 0) 1562 int rc; 1563 if (c == 0) 1564 rc = VINF_SUCCESS; 1565 else if (RTSemEventIsSignalSafe() || pVCpu == NULL) 1566 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite); 1567 else 1554 1568 { 1555 int rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite); 1556 AssertRC(rc); 1569 VMMR0EMTBLOCKCTX Ctx; 1570 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx); 1571 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc); 1572 1573 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite); 1574 1575 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx); 1557 1576 } 1558 break; 1577 AssertRC(rc); 1578 return rc; 1559 1579 } 1560 1580 } 1561 1581 else 1562 1582 { 1563 /* Reverse the direction and signal the reader threads. */ 1583 /* 1584 * Reverse the direction and signal the reader threads. 1585 */ 1564 1586 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK); 1565 1587 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT; … … 1568 1590 Assert(!pThis->s.Core.fNeedReset); 1569 1591 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true); 1570 int rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead); 1592 1593 int rc; 1594 if (RTSemEventMultiIsSignalSafe() || pVCpu == NULL) 1595 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead); 1596 else 1597 { 1598 VMMR0EMTBLOCKCTX Ctx; 1599 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx); 1600 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc); 1601 1602 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead); 1603 1604 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx); 1605 } 1571 1606 AssertRC(rc); 1572 break;1607 return rc; 1573 1608 } 1574 1609 } 1575 1610 1576 1611 ASMNopPause(); 1577 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) 1612 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC) 1613 { /*likely*/ } 1614 else 1578 1615 return VERR_SEM_DESTROYED; 1579 } 1616 ASMNopPause(); 1617 } 1618 /* not reached! */ 1580 1619 } 1581 1620 #endif /* IN_RING0 */ … … 1585 1624 * Queue the requested exit for ring-3 execution. 1586 1625 */ 1626 # ifndef IN_RING0 1587 1627 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); 1628 # endif 1588 1629 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++; 1589 1630 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
Note:
See TracChangeset
for help on using the changeset viewer.