- Timestamp:
- Aug 2, 2021 8:40:40 PM (4 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp
r90472 r90486 881 881 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect)); 882 882 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves)); 883 /** @todo This doesn't work any more for devices. */ 883 884 pVCpu->pdm.s.apQueuedCritSectLeaves[i] = MMHyperCCToR3(pVM, pCritSect); 884 885 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); -
trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp
r90385 r90486 46 46 *********************************************************************************************************************************/ 47 47 /** The number loops to spin for shared access in ring-3. */ 48 #define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 2048 #define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20 49 49 /** The number loops to spin for shared access in ring-0. */ 50 #define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 12850 #define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128 51 51 /** The number loops to spin for shared access in the raw-mode context. */ 52 #define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 12852 #define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128 53 53 54 54 /** The number loops to spin for exclusive access in ring-3. */ 55 #define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 2055 #define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20 56 56 /** The number loops to spin for exclusive access in ring-0. */ 57 #define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 25657 #define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256 58 58 /** The number loops to spin for exclusive access in the raw-mode context. */ 59 #define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256 60 59 #define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256 60 61 /** Max number of write or write/read recursions. */ 62 #define PDM_CRITSECTRW_MAX_RECURSIONS _1M 61 63 62 64 /* Undefine the automatic VBOX_STRICT API mappings. */ … … 91 93 92 94 95 DECL_NO_INLINE(static, int) pdmCritSectRwCorrupted(PPDMCRITSECTRW pThis) 96 { 97 ASMAtomicWriteU32(&pThis->s.Core.u32Magic, PDMCRITSECTRW_MAGIC_CORRUPT); 98 return VERR_PDM_CRITSECTRW_IPE; 99 } 93 100 94 101 … … 198 205 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; 199 206 c++; 200 Assert(c < RTCSRW_CNT_MASK / 2); 207 Assert(c < RTCSRW_CNT_MASK / 4); 208 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS); 201 209 u64State &= ~RTCSRW_CNT_RD_MASK; 202 210 u64State |= c << RTCSRW_CNT_RD_SHIFT; … … 228 236 { 229 237 /* Is the writer perhaps doing a read recursion? */ 230 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);231 238 RTNATIVETHREAD hNativeWriter; 232 239 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter); 233 if (hNative Self == hNativeWriter)240 if (hNativeWriter != NIL_RTNATIVETHREAD) 234 241 { 242 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis); 243 if (hNativeSelf == hNativeWriter) 244 { 235 245 #if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) 236 if (!fNoVal) 237 { 238 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos); 239 if (RT_FAILURE(rc9)) 240 return rc9; 246 if (!fNoVal) 247 { 248 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos); 249 if (RT_FAILURE(rc9)) 250 return rc9; 251 } 252 #endif 253 uint32_t const cReads = ASMAtomicIncU32(&pThis->s.Core.cWriterReads); 254 Assert(cReads < _16K); 255 AssertReturnStmt(cReads < PDM_CRITSECTRW_MAX_RECURSIONS, ASMAtomicDecU32(&pThis->s.Core.cWriterReads), 256 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS); 257 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared)); 258 return VINF_SUCCESS; /* don't break! */ 241 259 } 242 #endif243 Assert(pThis->s.Core.cWriterReads < UINT32_MAX / 2);244 ASMAtomicIncU32(&pThis->s.Core.cWriterReads);245 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));246 return VINF_SUCCESS; /* don't break! */247 260 } 248 261 … … 268 281 c++; 269 282 Assert(c < RTCSRW_CNT_MASK / 2); 283 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS); 270 284 271 285 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; … … 273 287 Assert(cWait <= c); 274 288 Assert(cWait < RTCSRW_CNT_MASK / 2); 289 AssertReturn(cWait < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS); 275 290 276 291 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK); … … 317 332 { 318 333 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); 319 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; Assert(c > 0); 334 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; 335 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis)); 320 336 c--; 321 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0); 337 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; 338 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis)); 322 339 cWait--; 323 340 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK); … … 342 359 343 360 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; 344 Assert (cWait > 0);361 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis)); 345 362 cWait--; 346 363 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK; … … 633 650 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State)); 634 651 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves)); 652 /** @todo This doesn't work any more for devices. */ 635 653 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = MMHyperCCToR3(pVM, pThis); 636 654 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); … … 650 668 else 651 669 { 670 /* 671 * Write direction. Check that it's the owner calling and that it has reads to undo. 672 */ 652 673 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis); 674 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT); 675 653 676 RTNATIVETHREAD hNativeWriter; 654 677 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter); … … 663 686 } 664 687 #endif 665 ASMAtomicDecU32(&pThis->s.Core.cWriterReads); 688 uint32_t cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriterReads); 689 AssertReturn(cDepth < PDM_CRITSECTRW_MAX_RECURSIONS, pdmCritSectRwCorrupted(pThis)); 666 690 } 667 691 668 692 return VINF_SUCCESS; 669 693 } 694 670 695 671 696 /** … … 744 769 * Check if we're already the owner and just recursing. 745 770 */ 746 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis); 771 RTNATIVETHREAD const hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis); 772 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT); 747 773 RTNATIVETHREAD hNativeWriter; 748 774 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter); … … 758 784 } 759 785 #endif 760 Assert(pThis->s.Core.cWriteRecursions < UINT32_MAX / 2);761 786 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl)); 762 ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions); 787 uint32_t const cDepth = ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions); 788 AssertReturnStmt(cDepth > 1 && cDepth <= PDM_CRITSECTRW_MAX_RECURSIONS, 789 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions), 790 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS); 763 791 return VINF_SUCCESS; 764 792 } … … 777 805 /* It flows in the right direction, try follow it before it changes. */ 778 806 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; 807 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS); 779 808 c++; 780 Assert(c < RTCSRW_CNT_ MASK / 2);809 Assert(c < RTCSRW_CNT_WR_MASK / 4); 781 810 u64State &= ~RTCSRW_CNT_WR_MASK; 782 811 u64State |= c << RTCSRW_CNT_WR_SHIFT; … … 802 831 /* Add ourselves to the write count and break out to do the wait. */ 803 832 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; 833 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS); 804 834 c++; 805 Assert(c < RTCSRW_CNT_ MASK / 2);835 Assert(c < RTCSRW_CNT_WR_MASK / 4); 806 836 u64State &= ~RTCSRW_CNT_WR_MASK; 807 837 u64State |= c << RTCSRW_CNT_WR_SHIFT; … … 820 850 /* 821 851 * If we're in write mode now try grab the ownership. Play fair if there 822 * are threads already waiting .852 * are threads already waiting, unless we're in ring-0. 823 853 */ 824 854 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT) … … 886 916 { 887 917 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); 888 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0); 918 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; 919 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis)); 889 920 c--; 890 921 u64State &= ~RTCSRW_CNT_WR_MASK; … … 920 951 { 921 952 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State); 922 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0); 953 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; 954 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis)); 923 955 c--; 924 956 u64State &= ~RTCSRW_CNT_WR_MASK; … … 1125 1157 #endif 1126 1158 1159 /* 1160 * Check ownership. 1161 */ 1127 1162 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis); 1163 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT); 1164 1128 1165 RTNATIVETHREAD hNativeWriter; 1129 1166 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter); … … 1165 1202 1166 1203 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; 1167 Assert (c > 0);1204 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis)); 1168 1205 c--; 1169 1206 … … 1217 1254 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++; 1218 1255 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis)); 1219 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves)); 1256 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves)); 1257 /** @todo This doesn't work anymore for devices. */ 1220 1258 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = MMHyperCCToR3(pVM, pThis); 1221 1259 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); … … 1231 1269 * Not the final recursion. 1232 1270 */ 1233 Assert(pThis->s.Core.cWriteRecursions != 0);1234 1271 #if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) 1235 1272 if (fNoVal) … … 1242 1279 } 1243 1280 #endif 1244 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions); 1281 uint32_t const cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions); 1282 AssertReturn(cDepth != 0 && cDepth < UINT32_MAX, pdmCritSectRwCorrupted(pThis)); 1245 1283 } 1246 1284 -
trunk/src/VBox/VMM/include/PDMInternal.h
r90468 r90486 508 508 typedef PDMCRITSECTRWINT *PPDMCRITSECTRWINT; 509 509 510 /** Special magic value we set the structure has become corrupted. */ 511 #define PDMCRITSECTRW_MAGIC_CORRUPT UINT32_C(0x0bad0620) 510 512 511 513
Note:
See TracChangeset
for help on using the changeset viewer.