- Timestamp:
- Jun 9, 2008 6:19:26 PM (17 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/solaris/timer-r0drv-solaris.c
r9444 r9557 5 5 6 6 /* 7 * Copyright (C) 2006-200 7Sun Microsystems, Inc.7 * Copyright (C) 2006-2008 Sun Microsystems, Inc. 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 37 37 #include <iprt/timer.h> 38 38 #include <iprt/time.h> 39 #include <iprt/mp.h> 39 40 #include <iprt/spinlock.h> 40 41 #include <iprt/err.h> … … 50 51 *******************************************************************************/ 51 52 /** 53 * This is used to track sub-timer data. 54 */ 55 typedef struct RTTIMERSOLSUBTIMER 56 { 57 /** The current timer tick. */ 58 uint64_t iTick; 59 /** Pointer to the parent timer. */ 60 PRTTIMER pParent; 61 } RTTIMERSOLSUBTIMER; 62 /** Pointer to a Solaris sub-timer. */ 63 typedef RTTIMERSOLSUBTIMER *PRTTIMERSOLSUBTIMER; 64 65 /** 52 66 * The internal representation of a Solaris timer handle. 53 67 */ … … 58 72 * is destroyed to indicate clearly that thread should exit. */ 59 73 uint32_t volatile u32Magic; 60 /** Flag indicating the the timer is suspended. */ 61 uint8_t volatile fSuspended; 74 /** The cyclic timer id. 75 * This is CYCLIC_NONE if the timer hasn't been started. */ 76 cyclic_id_t volatile CyclicId; 77 /** Flag used by rtTimerSolarisOmniOnlineCallback to see whether we're inside the cyclic_add_omni call or not. */ 78 bool volatile fStarting; 62 79 /** Whether the timer must run on a specific CPU or not. */ 63 uint8_t fSpecificCpu; 80 bool fSpecificCpu; 81 /** Set if we're using an omni cyclic. */ 82 bool fOmni; 64 83 /** The CPU it must run on if fSpecificCpu is set. */ 65 uint8_t iCpu; 66 /** The current timer tick (since last timer start). */ 67 uint64_t iTick; 68 /** The Solaris cyclic structure. */ 69 cyc_handler_t CyclicInfo; 70 /** The Solaris cyclic handle. */ 71 cyclic_id_t CyclicID; 84 RTCPUID idCpu; 72 85 /** Callback. */ 73 86 PFNRTTIMER pfnTimer; … … 76 89 /** The timer interval. 0 for one-shot timer */ 77 90 uint64_t u64NanoInterval; 91 /** The timer spec (for omni timers mostly). */ 92 cyc_time_t TimeSpecs; 93 /** The number of sub-timers. */ 94 RTCPUID cSubTimers; 95 /** Sub-timer data. 96 * When fOmni is set, this will be an array indexed by CPU id. 97 * When fOmni is clear, the array will only have one member. */ 98 RTTIMERSOLSUBTIMER aSubTimers[1]; 78 99 } RTTIMER; 79 100 … … 83 104 *******************************************************************************/ 84 105 static void rtTimerSolarisCallback(void *pvTimer); 85 static void rtTimerSolarisStop(PRTTIMER pTimer); 106 static void rtTimerSolarisOmniCallback(void *pvSubTimer); 107 static void rtTimerSolarisOmniDummyCallback(void *pvIgnored); 108 static void rtTimerSolarisOmniOnlineCallback(void *pvTimer, cpu_t *pCpu, cyc_handler_t *pCyclicInfo, cyc_time_t *pTimeSpecs); 109 static void rtTimerSolarisOmniOfflineCallback(void *pvTimer, cpu_t *pCpu, void *pvTick); 110 static bool rtTimerSolarisStop(PRTTIMER pTimer); 111 112 113 AssertCompileSize(cyclic_id_t, sizeof(void *)); 114 115 /** Atomic read of RTTIMER::CyclicId. */ 116 DECLINLINE(cyclic_id_t) rtTimerSolarisGetCyclicId(PRTTIMER pTimer) 117 { 118 return (cyclic_id_t)ASMAtomicUoReadPtr((void * volatile *)&pTimer->CyclicId); 119 } 120 121 122 /** Atomic write of RTTIMER::CyclicId. */ 123 DECLINLINE(cyclic_id_t) rtTimerSolarisSetCyclicId(PRTTIMER pTimer, cyclic_id_t CyclicIdNew) 124 { 125 ASMAtomicWritePtr((void * volatile *)&pTimer->CyclicId, (void *)CyclicIdNew); 126 } 127 128 129 /** Atomic compare and exchange of RTTIMER::CyclicId. */ 130 DECLINLINE(bool) rtTimerSolarisCmpXchgCyclicId(PRTTIMER pTimer, cyclic_id_t CyclicIdNew, cyclic_id_t CyclicIdOld) 131 { 132 return ASMAtomicCmpXchgPtr((void * volatile *)&pTimer->CyclicId, (void *)CyclicIdNew, (void *)CyclicIdOld); 133 } 86 134 87 135 88 136 RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, unsigned fFlags, PFNRTTIMER pfnTimer, void *pvUser) 89 137 { 138 RTCPUID i; 90 139 *ppTimer = NULL; 91 140 … … 96 145 return VERR_INVALID_PARAMETER; 97 146 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) 98 /** @todo implement && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL*/) 99 return VERR_NOT_SUPPORTED; 147 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL 148 && !RTMpIsCpuPossible((fFlags & RTTIMER_FLAGS_CPU_MASK))) 149 return VERR_CPU_NOT_FOUND; 100 150 101 151 /* 102 152 * Allocate and initialize the timer handle. 103 153 */ 104 PRTTIMER pTimer = (PRTTIMER)RTMemAlloc(sizeof(*pTimer)); 154 size_t cCpus = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL 155 ? RTMpGetMaxCpuId() + 1 /* ASSUMES small max value, no pointers. */ 156 : 1; 157 PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ(RT_OFFSETOF(RTTIMER, aSubTimers[cCpus])); 105 158 if (!pTimer) 106 159 return VERR_NO_MEMORY; 107 160 108 161 pTimer->u32Magic = RTTIMER_MAGIC; 109 pTimer->fSuspended = true; 110 pTimer->fSpecificCpu = !!(fFlags & RTTIMER_FLAGS_CPU_SPECIFIC); 111 pTimer->iCpu = fFlags & RTTIMER_FLAGS_CPU_MASK; 112 pTimer->iTick = 0; 113 pTimer->CyclicInfo.cyh_func = rtTimerSolarisCallback; 114 pTimer->CyclicInfo.cyh_level = CY_LOCK_LEVEL; 115 pTimer->CyclicInfo.cyh_arg = pTimer; 116 pTimer->CyclicID = CYCLIC_NONE; 117 pTimer->u64NanoInterval = u64NanoInterval; 162 pTimer->CyclicId = CYCLIC_NONE; 163 pTimer->fStarting = false; 164 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) 165 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL) 166 { 167 pTimer->fSpecificCpu = true; 168 pTimer->fOmni = false; 169 pTimer->idCpu = fFlags & RTTIMER_FLAGS_CPU_MASK; 170 } 171 else 172 { 173 pTimer->fSpecificCpu = false; 174 pTimer->fOmni = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL; 175 pTimer->idCpu = NIL_RTCPUID; 176 } 118 177 pTimer->pfnTimer = pfnTimer; 119 178 pTimer->pvUser = pvUser; 179 pTimer->u64NanoInterval = u64NanoInterval; 180 pTimer->cSubTimers = cCpus; 181 182 for (i = 0; i < cCpus; i++) 183 { 184 pTimer->aSubTimers[i].iTick = 0; 185 pTimer->aSubTimers[i].pParent = pTimer; 186 } 120 187 121 188 *ppTimer = pTimer; … … 124 191 125 192 126 /**127 * Validates the timer handle.128 *129 * @returns true if valid, false if invalid.130 * @param pTimer The handle.131 */132 DECLINLINE(bool) rtTimerIsValid(PRTTIMER pTimer)133 {134 AssertReturn(VALID_PTR(pTimer), false);135 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, false);136 return true;137 }138 139 140 193 RTDECL(int) RTTimerDestroy(PRTTIMER pTimer) 141 194 { 195 /* 196 * Validate. 197 */ 142 198 if (pTimer == NULL) 143 199 return VINF_SUCCESS; 144 if (!rtTimerIsValid(pTimer))145 return VERR_INVALID_HANDLE;146 147 /* 148 * Free the associated resources.149 */ 150 pTimer->u32Magic++;200 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE); 201 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE); 202 203 /* 204 * Invalid the timer, stop it, and free the associated resources. 205 */ 206 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC); 151 207 rtTimerSolarisStop(pTimer); 152 208 RTMemFree(pTimer); 209 153 210 return VINF_SUCCESS; 154 211 } … … 157 214 RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First) 158 215 { 159 cyc_time_t timerSpec; 160 161 if (!rtTimerIsValid(pTimer)) 162 return VERR_INVALID_HANDLE; 163 if (!pTimer->fSuspended) 164 return VERR_TIMER_ACTIVE; 165 166 /* 167 * Calc when it should start fireing. 168 */ 169 u64First += RTTimeNanoTS(); 170 171 pTimer->fSuspended = false; 172 pTimer->iTick = 0; 173 timerSpec.cyt_when = u64First; 174 timerSpec.cyt_interval = pTimer->u64NanoInterval == 0 ? u64First : pTimer->u64NanoInterval; 175 216 RTCPUID i; 217 cyclic_id_t CyclicId; 218 cyc_handler_t CyclicInfo; 219 cyc_omni_handler_t CyclicOmniInfo; 220 int rc = VINF_SUCCESS; 221 222 /* 223 * Validate. 224 */ 225 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE); 226 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE); 227 if (rtTimerSolarisGetCyclicId(pTimer) != CYCLIC_NONE) 228 { 229 /* 230 * If it's a one-shot we might end up here because it didn't stop after 231 * the first firing. There are two reasons for this depending on the 232 * kind type of timer. (1) Non-omni timers are (potentially) racing our 233 * RTTimerStart in setting RTTIMER::CyclicId. (2) Omni timers are stopped 234 * on the 2nd firing because we have to make sure all cpus gets called, and 235 * we're using the 2nd round that comes 1 sec after the first because this 236 * is the easier way out. 237 */ 238 if (pTimer->u64NanoInterval) 239 return VERR_TIMER_ACTIVE; 240 241 for (i = 0; i < pTimer->cSubTimers; i++) 242 if (pTimer->aSubTimers[i].iTick) 243 break; /* has fired */ 244 if (i >= pTimer->cSubTimers) 245 return VERR_TIMER_ACTIVE; 246 247 rtTimerSolarisStop(pTimer); 248 } 249 250 /* 251 * Do the setup bits that doesn't need the lock. 252 * We'll setup both omni and non-omni stuff here because it shorter than if'ing it. 253 */ 254 CyclicInfo.cyh_func = rtTimerSolarisCallback; 255 CyclicInfo.cyh_arg = pTimer; 256 CyclicInfo.cyh_level = CY_LOCK_LEVEL; 257 258 CyclicOmniInfo.cyo_online = rtTimerSolarisOmniOnlineCallback; 259 CyclicOmniInfo.cyo_offline = rtTimerSolarisOmniOfflineCallback; 260 CyclicOmniInfo.cyo_arg = pTimer; 261 262 for (i = 0; i > pTimer->cSubTimers; i++) 263 pTimer->aSubTimers[i].iTick = 0; 264 265 if (pTimer->fSpecificCpu && u64First < 10000) 266 u64First = RTTimeNanoTS() + 10000; /* Try make sure it doesn't fire before we re-bind it. */ 267 else 268 u64First += RTTimeNanoTS(); /* ASSUMES it is implemented via gethrtime() */ 269 270 pTimer->TimeSpecs.cyt_when = u64First; 271 pTimer->TimeSpecs.cyt_interval = !pTimer->u64NanoInterval 272 ? 1000000000 /* 1 sec */ 273 : pTimer->u64NanoInterval; 274 275 /* 276 * Acquire the cpu lock and call cyclic_add/cyclic_add_omni. 277 */ 176 278 mutex_enter(&cpu_lock); 177 pTimer->CyclicID = cyclic_add(&pTimer->CyclicInfo, &timerSpec); 279 280 ASMAtomicWriteBool(&pTimer->fStarting, true); 281 if (pTimer->fOmni) 282 CyclicId = cyclic_add_omni(&CyclicOmniInfo); 283 else if (pTimer->fSpecificCpu) 284 { 285 cpu_t *pCpu = cpu_get(pTimer->idCpu); 286 CyclicId = CYCLIC_NONE; 287 if (pCpu) 288 { 289 if (cpu_is_online(pCpu)) 290 { 291 CyclicId = cyclic_add(&CyclicInfo, &pTimer->TimeSpecs); 292 if (CyclicId != CYCLIC_NONE) 293 cyclic_bind(CyclicId, pCpu, NULL); 294 } 295 else 296 rc = VERR_CPU_OFFLINE; 297 } 298 else 299 rc = VERR_CPU_NOT_FOUND; 300 } 301 else 302 CyclicId = cyclic_add(&CyclicInfo, &pTimer->TimeSpecs); 303 304 rtTimerSolarisSetCyclicId(pTimer, CyclicId); 305 ASMAtomicWriteBool(&pTimer->fStarting, false); 306 178 307 mutex_exit(&cpu_lock); 179 308 309 /* 310 * Just some sanity checks should the cylic code start returning errors. 311 */ 312 Assert(RT_SUCCESS(rc) || CyclicId == CYCLIC_NONE); 313 if (CyclicId == CYCLIC_NONE && rc == VINF_SUCCESS) 314 rc = VERR_GENERAL_FAILURE; 315 return rc; 316 } 317 318 319 RTDECL(int) RTTimerStop(PRTTIMER pTimer) 320 { 321 /* 322 * Validate. 323 */ 324 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE); 325 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE); 326 327 /* 328 * Stop the timer. 329 */ 330 if (!rtTimerSolarisStop(pTimer)) 331 return VERR_TIMER_SUSPENDED; 180 332 return VINF_SUCCESS; 181 333 } 182 334 183 335 184 RTDECL(int) RTTimerStop(PRTTIMER pTimer) 185 { 186 if (!rtTimerIsValid(pTimer)) 187 return VERR_INVALID_HANDLE; 188 if (pTimer->fSuspended) 189 return VERR_TIMER_SUSPENDED; 190 191 /* 192 * Suspend the timer. 193 */ 194 pTimer->fSuspended = true; 195 rtTimerSolarisStop(pTimer); 196 197 return VINF_SUCCESS; 198 } 199 200 336 /** 337 * Timer callback function for non-omni timers. 338 * 339 * @param pvTimer Pointer to the timer. 340 */ 201 341 static void rtTimerSolarisCallback(void *pvTimer) 202 342 { 203 343 PRTTIMER pTimer = (PRTTIMER)pvTimer; 204 344 205 /* If this is a one shot timer, call pfnTimer and suspend 206 * as Solaris does not support 0 interval timers implictly 345 /* Check for destruction. */ 346 if (pTimer->u32Magic != RTTIMER_MAGIC) 347 return; 348 349 /* 350 * If this is a one shot timer, suspend the timer here as Solaris 351 * does not support single-shot timers implicitly. 207 352 */ 208 353 if (!pTimer->u64NanoInterval) 209 354 { 210 pTimer->fSuspended = true;211 355 rtTimerSolarisStop(pTimer); 212 } 213 214 /* Callback user defined callback function */ 215 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pTimer->iTick); 216 } 217 218 219 static void rtTimerSolarisStop(PRTTIMER pTimer) 220 { 221 /* Important we check for invalid cyclic object */ 222 if (pTimer->CyclicID != CYCLIC_NONE) 356 if (!pTimer->aSubTimers[0].iTick) 357 { 358 ASMAtomicWriteU64(&pTimer->aSubTimers[0].iTick, 1); /* paranoia */ 359 pTimer->pfnTimer(pTimer, pTimer->pvUser, 1); 360 } 361 } 362 else 363 /* recurring */ 364 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pTimer->aSubTimers[0].iTick); 365 } 366 367 368 /** 369 * Timer callback function for omni timers. 370 * 371 * @param pvTimer Pointer to the sub-timer. 372 */ 373 static void rtTimerSolarisOmniCallback(void *pvSubTimer) 374 { 375 PRTTIMERSOLSUBTIMER pSubTimer = (PRTTIMERSOLSUBTIMER)pvSubTimer; 376 PRTTIMER pTimer = pSubTimer->pParent; 377 378 /* Check for destruction. */ 379 if ( !VALID_PTR(pTimer) 380 || pTimer->u32Magic != RTTIMER_MAGIC) 381 return; 382 383 /* 384 * If this is a one-shot timer, suspend it here the 2nd time around. 385 * We cannot do it the first time like for the non-omni timers since 386 * we don't know if it has fired on all the cpus yet. 387 */ 388 if (!pTimer->u64NanoInterval) 389 { 390 if (!pSubTimer->iTick) 391 { 392 ASMAtomicWriteU64(&pSubTimer->iTick, 1); /* paranoia */ 393 pTimer->pfnTimer(pTimer, pTimer->pvUser, 1); 394 } 395 else 396 rtTimerSolarisStop(pTimer); 397 } 398 else 399 /* recurring */ 400 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick); 401 } 402 403 404 /** 405 * This is a dummy callback use for the cases where we get cpus which id we 406 * cannot handle because of broken RTMpGetMaxCpuId(), or if we're racing 407 * RTTimerDestroy(). 408 * 409 * This shouldn't happen of course, but if it does we wish to handle 410 * gracefully instead of crashing. 411 * 412 * @param pvIgnored Ignored 413 */ 414 static void rtTimerSolarisOmniDummyCallback(void *pvIgnored) 415 { 416 NOREF(pvIgnored); 417 } 418 419 420 /** 421 * Omni-timer callback that sets up the timer for a cpu during cyclic_add_omni 422 * or at later when a CPU comes online. 423 * 424 * 425 * @param pvTimer Pointer to the timer. 426 * @param pCpu The cpu that has come online. 427 * @param pCyclicInfo Where to store the cyclic handler info. 428 * @param pTimeSpecs Where to store the timer firing specs. 429 */ 430 static void rtTimerSolarisOmniOnlineCallback(void *pvTimer, cpu_t *pCpu, cyc_handler_t *pCyclicInfo, cyc_time_t *pTimeSpecs) 431 { 432 PRTTIMER pTimer = (PRTTIMER)pvTimer; 433 RTCPUID idCpu = pCpu->cpu_id; 434 AssertMsg(idCpu < pTimer->cSubTimers, ("%d < %d\n", (int)idCpu, (int)pTimer->cSubTimers)); 435 if ( idCpu < pTimer->cSubTimers 436 && pTimer->u32Magic == RTTIMER_MAGIC) 437 { 438 PRTTIMERSOLSUBTIMER pSubTimer = &pTimer->aSubTimers[idCpu]; 439 440 pCyclicInfo->cyh_func = rtTimerSolarisOmniCallback; 441 pCyclicInfo->cyh_arg = pSubTimer; 442 pCyclicInfo->cyh_level = CY_LOCK_LEVEL; 443 444 if (pTimer->fStarting) 445 { 446 /* 447 * Called during cyclic_add_omni, just spread the 2nd tick 448 * for the one-shots to avoid unnecessary lock contention. 449 */ 450 *pTimeSpecs = pTimer->TimeSpecs; 451 if (!pTimer->u64NanoInterval) 452 pTimeSpecs->cyt_interval += idCpu * (unsigned)nsec_per_tick * 2U; 453 } 454 else 455 { 456 /* 457 * Called at run-time, have to make sure cyt_when isn't in the past. 458 */ 459 ASMAtomicWriteU64(&pSubTimer->iTick, 0); /* paranoia */ 460 461 uint64_t u64Now = RTTimeNanoTS(); /* ASSUMES it's implemented using gethrtime(). */ 462 if (pTimer->TimeSpecs.cyt_when > u64Now) 463 *pTimeSpecs = pTimer->TimeSpecs; 464 else 465 { 466 if (!pTimer->u64NanoInterval) 467 { 468 /* one-shot: Just schedule a 1 sec timeout and set the tick to 1. */ 469 pTimeSpecs->cyt_when = u64Now + 1000000000; 470 pTimeSpecs->cyt_interval = 1000000000; 471 ASMAtomicWriteU64(&pSubTimer->iTick, 1); 472 } 473 else 474 { 475 #if 1 /* This might be made into a RTTIMER_FLAGS_something later, for now ASAP is what we need. */ 476 /* recurring: ASAP. */ 477 pTimeSpecs->cyt_when = u64Now; 478 #else 479 /* recurring: Adjust it to the next tick. */ 480 uint64_t cTicks = (u64Now - pTimer->TimeSpecs.cyt_when) / pTimer->TimeSpecs.cyt_interval; 481 pTimeSpecs->cyt_when = (cTicks + 1) * pTimer->TimeSpecs.cyt_interval; 482 #endif 483 pTimeSpecs->cyt_interval = pTimer->TimeSpecs.cyt_interval; 484 } 485 } 486 } 487 } 488 else 489 { 490 /* 491 * Invalid cpu id or destruction race. 492 */ 493 pCyclicInfo->cyh_func = rtTimerSolarisOmniDummyCallback; 494 pCyclicInfo->cyh_arg = NULL; 495 pCyclicInfo->cyh_level = CY_LOCK_LEVEL; 496 497 pTimeSpecs->cyt_when = RTTimeNanoTS() + 1000000000; 498 pTimeSpecs->cyt_interval = 1000000000; 499 } 500 } 501 502 503 /** 504 * Callback for when a CPU goes offline. 505 * 506 * Currently, we don't need to perform any tasks here. 507 * 508 * @param pvTimer Pointer to the timer. 509 * @param pCpu Pointer to the cpu. 510 * @param pvSubTimer Pointer to the sub timer. This may be NULL. 511 */ 512 static void rtTimerSolarisOmniOfflineCallback(void *pvTimer, cpu_t *pCpu, void *pvSubTimer) 513 { 514 /*PRTTIMER pTimer = (PRTTIMER)pvTimer;*/ 515 NOREF(pvTimer); 516 NOREF(pCpu); 517 NOREF(pvSubTimer); 518 } 519 520 521 /** 522 * Worker function used to stop the timer. 523 * 524 * This is used from within the callback functions (one-shot scenarious) and 525 * from RTTimerStop, RTTimerDestroy and RTTimerStart. We use atomic cmpxchg 526 * here to avoid some unnecessary cpu_lock contention and to avoid 527 * potential (?) deadlocks between the callback and the APIs. There is a 528 * slight chance of a race between active callbacks and the APIs, but this 529 * is preferred to a 530 * 531 * @returns true if we stopped it, false if it was already stopped. 532 * @param pTimer The timer to stop. 533 */ 534 static bool rtTimerSolarisStop(PRTTIMER pTimer) 535 { 536 /* 537 * This is a bit problematic. I'm a bit unsure whether cyclic_remove might 538 * or may not deadlock with a callback trying to aquire the cpu_lock. So, 539 * in order to avoid this issue I'm making sure that we don't take the lock 540 * unless we know we're gonna call cyclic_remove. However, the downside of 541 * this is that it's possible races between RTTimerStart/RTTimerDestroy and 542 * currently active callbacks, which may cause iTick to have a bad value or 543 * in the worst case, memory to accessed after cleanup. 544 */ 545 cyclic_id_t CyclicId = rtTimerSolarisGetCyclicId(pTimer); 546 if ( CyclicId != CYCLIC_NONE 547 && rtTimerSolarisCmpXchgCyclicId(pTimer, CYCLIC_NONE, CyclicId)) 223 548 { 224 549 mutex_enter(&cpu_lock); 225 cyclic_remove( pTimer->CyclicID);550 cyclic_remove(CyclicId); 226 551 mutex_exit(&cpu_lock); 227 pTimer->CyclicID = CYCLIC_NONE; 228 } 552 return true; 553 } 554 return false; 229 555 } 230 556
Note:
See TracChangeset
for help on using the changeset viewer.