Changeset 21593 in vbox for trunk/src/VBox/Runtime/r0drv/generic/semspinmutex-r0drv-generic.c
- Timestamp:
- Jul 14, 2009 10:32:38 PM (15 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/generic/semspinmutex-r0drv-generic.c
r21555 r21593 150 150 static int rtSemSpinMutexEnter(RTSEMSPINMUTEXSTATE *pState, RTSEMSPINMUTEXINTERNAL *pThis) 151 151 { 152 int rc = VINF_SUCCESS; 153 152 154 /** @todo Later #1: When entering in interrupt context and we're not able to 153 155 * wake up threads from it, we could try switch the lock into pure … … 187 189 { 188 190 if (!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE)) 189 r eturn VERR_SEM_BAD_CONTEXT;191 rc = VINF_SEM_BAD_CONTEXT; /* Try, but owner might be interrupted. */ 190 192 pState->fSpin = true; 191 193 } … … 212 214 pState->fSavedFlags = 0; 213 215 214 return VINF_SUCCESS;216 return rc; 215 217 } 216 218 … … 270 272 { 271 273 /* Busy, too bad. Check for attempts at nested access. */ 272 intrc = VERR_SEM_BUSY;274 rc = VERR_SEM_BUSY; 273 275 if (RT_UNLIKELY(pThis->hOwner == hSelf)) 274 276 { … … 329 331 330 332 /* 333 * Return if we're in interrupt context and the semaphore isn't 334 * configure to be interrupt safe. 335 */ 336 if (rc == VINF_SEM_BAD_CONTEXT) 337 { 338 rtSemSpinMutexLeave(&State); 339 return VERR_SEM_BAD_CONTEXT; 340 } 341 342 /* 331 343 * Ok, we have to wait. 332 344 */ 333 for (cSpins = 0;; cSpins++)345 if (State.fSpin) 334 346 { 335 ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc); 336 if (fRc) 337 break; 338 339 if (RT_UNLIKELY(pThis->u32Magic != RTSEMSPINMUTEX_MAGIC)) 347 for (cSpins = 0; ; cSpins++) 340 348 { 341 rtSemSpinMutexLeave(&State); 342 return VERR_SEM_DESTROYED; 349 ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc); 350 if (fRc) 351 break; 352 ASMNopPause(); 353 if (RT_UNLIKELY(pThis->u32Magic != RTSEMSPINMUTEX_MAGIC)) 354 { 355 rtSemSpinMutexLeave(&State); 356 return VERR_SEM_DESTROYED; 357 } 358 359 /* 360 * "Yield" once in a while. This may lower our IRQL/PIL which 361 * may preempting us, and it will certainly stop the hammering 362 * of hOwner for a little while. 363 */ 364 if ((cSpins & 0x7f) == 0x1f) 365 { 366 rtSemSpinMutexLeave(&State); 367 rtSemSpinMutexEnter(&State, pThis); 368 Assert(State.fSpin); 369 } 343 370 } 344 345 if ( State.fSpin 346 || (cSpins & 15) != 15 /* spin a bit everytime we wake up. */) 371 } 372 else 373 { 374 for (cSpins = 0;; cSpins++) 375 { 376 ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc); 377 if (fRc) 378 break; 347 379 ASMNopPause(); 348 else 349 { 350 rtSemSpinMutexLeave(&State); 351 352 rc = RTSemEventWait(pThis->hEventSem, RT_INDEFINITE_WAIT); 353 ASMCompilerBarrier(); 354 if (RT_SUCCESS(rc)) 355 AssertReturn(pThis->u32Magic == RTSEMSPINMUTEX_MAGIC, VERR_SEM_DESTROYED); 356 else if (rc == VERR_INTERRUPTED) 357 AssertRC(rc); /* shouldn't happen */ 358 else 380 if (RT_UNLIKELY(pThis->u32Magic != RTSEMSPINMUTEX_MAGIC)) 359 381 { 360 AssertRC(rc);361 return rc;382 rtSemSpinMutexLeave(&State); 383 return VERR_SEM_DESTROYED; 362 384 } 363 385 364 rc = rtSemSpinMutexEnter(&State, pThis); 365 AssertRCReturn(rc, rc); 386 if ((cSpins & 15) == 15) /* spin a bit before going sleep (again). */ 387 { 388 rtSemSpinMutexLeave(&State); 389 390 rc = RTSemEventWait(pThis->hEventSem, RT_INDEFINITE_WAIT); 391 ASMCompilerBarrier(); 392 if (RT_SUCCESS(rc)) 393 AssertReturn(pThis->u32Magic == RTSEMSPINMUTEX_MAGIC, VERR_SEM_DESTROYED); 394 else if (rc == VERR_INTERRUPTED) 395 AssertRC(rc); /* shouldn't happen */ 396 else 397 { 398 AssertRC(rc); 399 return rc; 400 } 401 402 rc = rtSemSpinMutexEnter(&State, pThis); 403 AssertRCReturn(rc, rc); 404 Assert(!State.fSpin); 405 } 366 406 } 367 407 }
Note:
See TracChangeset
for help on using the changeset viewer.