Changeset 90671 in vbox
- Timestamp:
- Aug 12, 2021 9:04:49 PM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 146283
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp
r90670 r90671 224 224 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT)); 225 225 return VINF_SUCCESS; 226 } 227 228 229 /** 230 * Worker for pdmCritSectRwEnterShared that handles waiting for a contended CS. 231 * Caller has already added us to the read and read-wait counters. 232 */ 233 static int pdmCritSectRwEnterSharedContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis, 234 int rcBusy, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf) 235 { 236 RT_NOREF(pVCpu, rcBusy); 237 238 # if !defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) 239 hThreadSelf = RTThreadSelf(); 240 # endif 241 242 uint64_t u64State; 243 uint64_t u64OldState; 244 for (uint32_t iLoop = 0; ; iLoop++) 245 { 246 int rc; 247 # ifdef IN_RING3 248 # if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) 249 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true, 250 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false); 251 if (RT_SUCCESS(rc)) 252 # else 253 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false); 254 # endif 255 # endif 256 { 257 for (;;) 258 { 259 rc = SUPSemEventMultiWaitNoResume(pVM->pSession, 260 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, 261 RT_INDEFINITE_WAIT); 262 if ( rc != VERR_INTERRUPTED 263 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) 264 break; 265 # ifdef IN_RING0 266 pdmR0CritSectRwYieldToRing3(pVM); 267 # endif 268 } 269 # ifdef IN_RING3 270 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ); 271 # endif 272 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) 273 return VERR_SEM_DESTROYED; 274 } 275 if (RT_FAILURE(rc)) 276 { 277 /* Decrement the counts and return the error. */ 278 for (;;) 279 { 280 u64OldState = u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State); 281 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; 282 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid read count on bailout")); 283 c--; 284 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; 285 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count on bailout")); 286 cWait--; 287 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK); 288 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT); 289 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState)) 290 break; 291 292 ASMNopPause(); 293 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED); 294 } 295 return rc; 296 } 297 298 Assert(pThis->s.Core.fNeedReset); 299 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State); 300 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT)) 301 break; 302 AssertMsg(iLoop < 1, ("%u\n", iLoop)); 303 } 304 305 /* Decrement the wait count and maybe reset the semaphore (if we're last). */ 306 for (;;) 307 { 308 u64OldState = u64State; 309 310 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; 311 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count")); 312 cWait--; 313 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK; 314 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT; 315 316 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState)) 317 { 318 if (cWait == 0) 319 { 320 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false)) 321 { 322 int rc = SUPSemEventMultiReset(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead); 323 AssertRCReturn(rc, rc); 324 } 325 } 326 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf); 327 } 328 329 ASMNopPause(); 330 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED); 331 ASMNopPause(); 332 333 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State); 334 } 335 336 /* not reached */ 226 337 } 227 338 … … 360 471 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState)) 361 472 { 362 # if !defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) 363 hThreadSelf = RTThreadSelf(); 364 # endif 365 366 for (uint32_t iLoop = 0; ; iLoop++) 367 { 368 int rc; 369 # ifdef IN_RING3 370 # if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3) 371 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true, 372 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false); 373 if (RT_SUCCESS(rc)) 374 # else 375 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false); 376 # endif 377 # endif 378 { 379 for (;;) 380 { 381 rc = SUPSemEventMultiWaitNoResume(pVM->pSession, 382 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, 383 RT_INDEFINITE_WAIT); 384 if ( rc != VERR_INTERRUPTED 385 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) 386 break; 387 # ifdef IN_RING0 388 pdmR0CritSectRwYieldToRing3(pVM); 389 # endif 390 } 391 # ifdef IN_RING3 392 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ); 393 # endif 394 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC) 395 return VERR_SEM_DESTROYED; 396 } 397 if (RT_FAILURE(rc)) 398 { 399 /* Decrement the counts and return the error. */ 400 for (;;) 401 { 402 u64OldState = u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State); 403 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; 404 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid read count on bailout")); 405 c--; 406 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; 407 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count on bailout")); 408 cWait--; 409 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK); 410 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT); 411 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState)) 412 break; 413 414 ASMNopPause(); 415 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED); 416 } 417 return rc; 418 } 419 420 Assert(pThis->s.Core.fNeedReset); 421 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State); 422 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT)) 423 break; 424 AssertMsg(iLoop < 1, ("%u\n", iLoop)); 425 } 426 427 /* Decrement the wait count and maybe reset the semaphore (if we're last). */ 428 for (;;) 429 { 430 u64OldState = u64State; 431 432 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; 433 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count")); 434 cWait--; 435 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK; 436 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT; 437 438 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState)) 439 { 440 if (cWait == 0) 441 { 442 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false)) 443 { 444 int rc = SUPSemEventMultiReset(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead); 445 AssertRCReturn(rc, rc); 446 } 447 } 448 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf); 449 } 450 451 ASMNopPause(); 452 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED); 453 ASMNopPause(); 454 455 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State); 456 } 457 458 /* not reached */ 473 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf); 459 474 } 460 475 }
Note:
See TracChangeset
for help on using the changeset viewer.