Changeset 90658 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Aug 12, 2021 11:29:37 AM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 146270
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp
r90654 r90658 1390 1390 1391 1391 /* 1392 * Unwind one recursion. Is it the final one? 1393 */ 1394 if (pThis->s.Core.cWriteRecursions == 1) 1395 { 1396 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */ 1392 * Unwind one recursion. Not the last? 1393 */ 1394 if (pThis->s.Core.cWriteRecursions != 1) 1395 { 1397 1396 #if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) 1398 1397 if (fNoVal) … … 1400 1399 else 1401 1400 { 1402 int rc9 = RTLockValidatorRecExcl ReleaseOwner(pThis->s.Core.pValidatorWrite, true);1401 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite); 1403 1402 if (RT_FAILURE(rc9)) 1404 1403 return rc9; 1405 1404 } 1406 1405 #endif 1406 #ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF 1407 uint32_t const cDepth = --pThis->s.Core.cWriteRecursions; 1408 #else 1409 uint32_t const cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions); 1410 #endif 1411 AssertReturn(cDepth != 0 && cDepth < UINT32_MAX, pdmCritSectRwCorrupted(pThis, "Invalid write recursion value on leave")); 1412 return VINF_SUCCESS; 1413 } 1414 1415 /* 1416 * Final recursion. 1417 */ 1418 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */ 1419 #if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) 1420 if (fNoVal) 1421 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD); 1422 else 1423 { 1424 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true); 1425 if (RT_FAILURE(rc9)) 1426 return rc9; 1427 } 1428 #endif 1407 1429 1408 1430 #ifdef RTASM_HAVE_CMP_WRITE_U128 1409 /* 1410 * See if we can get out w/o any signalling as this is a common case. 1411 */ 1412 if (pdmCritSectRwIsCmpWriteU128Supported()) 1413 { 1414 RTCRITSECTRWSTATE OldState; 1415 OldState.s.u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State); 1416 if (OldState.s.u64State == ((UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))) 1417 { 1418 OldState.s.hNativeWriter = hNativeSelf; 1419 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo)); 1420 1421 RTCRITSECTRWSTATE NewState; 1422 NewState.s.u64State = RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT; 1423 NewState.s.hNativeWriter = NIL_RTNATIVETHREAD; 1424 1425 # ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF 1426 pThis->s.Core.cWriteRecursions = 0; 1427 # else 1428 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0); 1429 # endif 1430 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl); 1431 1432 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128)) 1433 return VINF_SUCCESS; 1434 1435 /* bail out. */ 1436 pThis->s.Core.cWriteRecursions = 1; 1437 } 1438 } 1439 #endif 1440 1441 /* 1442 * Update the state. 1443 */ 1444 #if defined(IN_RING3) || defined(IN_RING0) 1445 # ifdef IN_RING0 1446 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD) 1447 && ASMIntAreEnabled()) 1448 # endif 1449 { 1431 /* 1432 * See if we can get out w/o any signalling as this is a common case. 1433 */ 1434 if (pdmCritSectRwIsCmpWriteU128Supported()) 1435 { 1436 RTCRITSECTRWSTATE OldState; 1437 OldState.s.u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State); 1438 if (OldState.s.u64State == ((UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))) 1439 { 1440 OldState.s.hNativeWriter = hNativeSelf; 1441 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo)); 1442 1443 RTCRITSECTRWSTATE NewState; 1444 NewState.s.u64State = RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT; 1445 NewState.s.hNativeWriter = NIL_RTNATIVETHREAD; 1446 1450 1447 # ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF 1451 1448 pThis->s.Core.cWriteRecursions = 0; … … 1454 1451 # endif 1455 1452 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl); 1456 ASMAtomicWriteHandle(&pThis->s.Core.u.s.hNativeWriter, NIL_RTNATIVETHREAD); 1457 1458 for (;;) 1453 1454 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128)) 1455 return VINF_SUCCESS; 1456 1457 /* bail out. */ 1458 pThis->s.Core.cWriteRecursions = 1; 1459 } 1460 } 1461 #endif 1462 1463 #if defined(IN_RING3) 1464 /* 1465 * Ring-3: Straight forward, just update the state and if necessary signal waiters. 1466 */ 1467 # ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF 1468 pThis->s.Core.cWriteRecursions = 0; 1469 # else 1470 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0); 1471 # endif 1472 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl); 1473 ASMAtomicWriteHandle(&pThis->s.Core.u.s.hNativeWriter, NIL_RTNATIVETHREAD); 1474 1475 for (;;) 1476 { 1477 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State); 1478 uint64_t u64OldState = u64State; 1479 1480 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; 1481 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on leave")); 1482 c--; 1483 1484 if ( c > 0 1485 || (u64State & RTCSRW_CNT_RD_MASK) == 0) 1486 { 1487 /* Don't change the direction, wake up the next writer if any. */ 1488 u64State &= ~RTCSRW_CNT_WR_MASK; 1489 u64State |= c << RTCSRW_CNT_WR_SHIFT; 1490 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState)) 1459 1491 { 1460 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State); 1461 uint64_t u64OldState = u64State; 1462 1463 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; 1464 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on leave")); 1465 c--; 1466 1467 if ( c > 0 1468 || (u64State & RTCSRW_CNT_RD_MASK) == 0) 1492 if (c > 0) 1469 1493 { 1470 /* Don't change the direction, wake up the next writer if any. */ 1471 u64State &= ~RTCSRW_CNT_WR_MASK; 1472 u64State |= c << RTCSRW_CNT_WR_SHIFT; 1473 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState)) 1494 int rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite); 1495 AssertRC(rc); 1496 } 1497 return VINF_SUCCESS; 1498 } 1499 } 1500 else 1501 { 1502 /* Reverse the direction and signal the reader threads. */ 1503 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK); 1504 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT; 1505 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState)) 1506 { 1507 Assert(!pThis->s.Core.fNeedReset); 1508 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true); 1509 int rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead); 1510 AssertRC(rc); 1511 return VINF_SUCCESS; 1512 } 1513 } 1514 1515 ASMNopPause(); 1516 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) 1517 return VERR_SEM_DESTROYED; 1518 } 1519 1520 1521 #elif defined(IN_RING0) 1522 /* 1523 * Update the state. 1524 */ 1525 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD) 1526 && ASMIntAreEnabled()) 1527 { 1528 # ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF 1529 pThis->s.Core.cWriteRecursions = 0; 1530 # else 1531 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0); 1532 # endif 1533 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl); 1534 ASMAtomicWriteHandle(&pThis->s.Core.u.s.hNativeWriter, NIL_RTNATIVETHREAD); 1535 1536 for (;;) 1537 { 1538 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State); 1539 uint64_t u64OldState = u64State; 1540 1541 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; 1542 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on leave")); 1543 c--; 1544 1545 if ( c > 0 1546 || (u64State & RTCSRW_CNT_RD_MASK) == 0) 1547 { 1548 /* Don't change the direction, wake up the next writer if any. */ 1549 u64State &= ~RTCSRW_CNT_WR_MASK; 1550 u64State |= c << RTCSRW_CNT_WR_SHIFT; 1551 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState)) 1552 { 1553 if (c > 0) 1474 1554 { 1475 if (c > 0) 1476 { 1477 int rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite); 1478 AssertRC(rc); 1479 } 1480 break; 1555 int rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite); 1556 AssertRC(rc); 1481 1557 } 1558 break; 1482 1559 } 1483 else 1560 } 1561 else 1562 { 1563 /* Reverse the direction and signal the reader threads. */ 1564 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK); 1565 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT; 1566 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState)) 1484 1567 { 1485 /* Reverse the direction and signal the reader threads. */ 1486 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK); 1487 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT; 1488 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState)) 1489 { 1490 Assert(!pThis->s.Core.fNeedReset); 1491 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true); 1492 int rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead); 1493 AssertRC(rc); 1494 break; 1495 } 1568 Assert(!pThis->s.Core.fNeedReset); 1569 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true); 1570 int rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead); 1571 AssertRC(rc); 1572 break; 1496 1573 } 1497 1498 ASMNopPause();1499 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)1500 return VERR_SEM_DESTROYED;1501 1574 } 1502 } 1503 #endif /* IN_RING3 || IN_RING0 */ 1575 1576 ASMNopPause(); 1577 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) 1578 return VERR_SEM_DESTROYED; 1579 } 1580 } 1581 #endif /* IN_RING0 */ 1582 1504 1583 #ifndef IN_RING3 1505 # ifdef IN_RING0 1506 else 1507 # endif 1508 { 1509 /* 1510 * We cannot call neither SUPSemEventSignal nor SUPSemEventMultiSignal, 1511 * so queue the exit request (ring-3). 1512 */ 1513 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); 1514 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++; 1515 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis)); 1516 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves), 1517 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE); 1518 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = pThis->s.pSelfR3; 1519 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, 1520 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i]) 1521 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] & PAGE_OFFSET_MASK) 1522 == ((uintptr_t)pThis & PAGE_OFFSET_MASK), 1523 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i], pThis), 1524 pdmCritSectRwCorrupted(pThis, "Invalid self pointer on queue (excl)")); 1525 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); 1526 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); 1527 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves); 1528 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl); 1529 } 1530 #endif 1531 } 1532 else 1533 { 1534 /* 1535 * Not the final recursion. 1536 */ 1537 #if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) 1538 if (fNoVal) 1539 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD); 1540 else 1541 { 1542 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite); 1543 if (RT_FAILURE(rc9)) 1544 return rc9; 1545 } 1546 #endif 1547 #ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF 1548 uint32_t const cDepth = --pThis->s.Core.cWriteRecursions; 1549 #else 1550 uint32_t const cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions); 1551 #endif 1552 AssertReturn(cDepth != 0 && cDepth < UINT32_MAX, pdmCritSectRwCorrupted(pThis, "Invalid write recursion value on leave")); 1553 } 1554 1584 /* 1585 * Queue the requested exit for ring-3 execution. 1586 */ 1587 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); 1588 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++; 1589 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis)); 1590 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves), 1591 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE); 1592 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = pThis->s.pSelfR3; 1593 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, 1594 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i]) 1595 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] & PAGE_OFFSET_MASK) 1596 == ((uintptr_t)pThis & PAGE_OFFSET_MASK), 1597 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i], pThis), 1598 pdmCritSectRwCorrupted(pThis, "Invalid self pointer on queue (excl)")); 1599 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); 1600 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); 1601 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves); 1602 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl); 1555 1603 return VINF_SUCCESS; 1604 #endif 1556 1605 } 1557 1606
Note:
See TracChangeset
for help on using the changeset viewer.