- Timestamp:
- Dec 10, 2021 12:43:03 AM (3 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/nt/timer-r0drv-nt.cpp
r92825 r92857 102 102 /** User argument. */ 103 103 void *pvUser; 104 105 /** @name Periodic scheduling / RTTimerChangeInterval. 106 * @{ */ 107 /** Spinlock protecting the u64NanoInterval, iMasterTick, uNtStartTime, 108 * uNtDueTime and (at least for updating) fSuspended. */ 109 KSPIN_LOCK Spinlock; 104 110 /** The timer interval. 0 if one-shot. */ 105 uint64_t u64NanoInterval; 111 uint64_t volatile u64NanoInterval; 112 /** The the current master tick. This does not necessarily follow that of 113 * the subtimer, as RTTimerChangeInterval may cause it to reset. */ 114 uint64_t volatile iMasterTick; 106 115 #ifdef RTR0TIMER_NT_MANUAL_RE_ARM 107 116 /** The desired NT time of the first tick. */ 108 uint64_t uNtStartTime; 109 #endif 117 uint64_t volatile uNtStartTime; 118 /** The current due time (absolute interrupt time). */ 119 uint64_t volatile uNtDueTime; 120 #endif 121 /** @} */ 122 110 123 /** The NT timer object. */ 111 124 KTIMER NtTimer; … … 125 138 126 139 #ifdef RTR0TIMER_NT_MANUAL_RE_ARM 140 127 141 /** 128 142 * Get current NT interrupt time. … … 147 161 # endif 148 162 } 163 164 /** 165 * Get current NT interrupt time, high resolution variant. 166 * @return High resolution NT interrupt time 167 */ 168 static uint64_t rtTimerNtQueryInterruptTimeHighRes(void) 169 { 170 if (g_pfnrtKeQueryInterruptTimePrecise) 171 { 172 ULONG64 uQpcIgnored; 173 return g_pfnrtKeQueryInterruptTimePrecise(&uQpcIgnored); 174 } 175 return rtTimerNtQueryInterruptTime(); 176 } 177 149 178 #endif /* RTR0TIMER_NT_MANUAL_RE_ARM */ 150 179 151 180 152 181 /** 182 * Worker for rtTimerNtRearmInternval that calculates the next due time. 183 * 184 * @returns The next due time (relative, so always negative). 185 * @param uNtNow The current time. 186 * @param uNtStartTime The start time of the timer. 187 * @param iTick The next tick number (zero being @a uNtStartTime). 188 * @param cNtInterval The timer interval in NT ticks. 189 * @param cNtNegDueSaftyMargin The due time safety margin in negative NT 190 * ticks. 191 * @param cNtMinNegInterval The minium interval to use when in catchup 192 * mode, also negative NT ticks. 193 */ 194 DECLINLINE(int64_t) rtTimerNtCalcNextDueTime(uint64_t uNtNow, uint64_t uNtStartTime, uint64_t iTick, uint64_t cNtInterval, 195 int32_t const cNtNegDueSaftyMargin, int32_t const cNtMinNegInterval) 196 { 197 /* Calculate the actual time elapsed since timer start: */ 198 int64_t iDueTime = uNtNow - uNtStartTime; 199 if (iDueTime < 0) 200 iDueTime = 0; 201 202 /* Now calculate the nominal time since timer start for the next tick: */ 203 uint64_t const uNtNextRelStart = iTick * cNtInterval; 204 205 /* Calulate now much time we have to the next tick: */ 206 iDueTime -= uNtNextRelStart; 207 208 /* If we haven't already overshot the due time, including some safety margin, we're good: */ 209 if (iDueTime < cNtNegDueSaftyMargin) 210 return iDueTime; 211 212 /* Okay, we've overshot it and are in catchup mode: */ 213 if (iDueTime < (int64_t)cNtInterval) 214 iDueTime = -(int64_t)(cNtInterval / 2); /* double time */ 215 else if (iDueTime < (int64_t)(cNtInterval * 4)) 216 iDueTime = -(int64_t)(cNtInterval / 4); /* quadruple time */ 217 else 218 return cNtMinNegInterval; 219 220 /* Make sure we don't try intervals smaller than the minimum specified by the caller: */ 221 if (iDueTime > cNtMinNegInterval) 222 iDueTime = cNtMinNegInterval; 223 return iDueTime; 224 } 225 226 /** 153 227 * Manually re-arms an internval timer. 154 228 * 155 229 * Turns out NT doesn't necessarily do a very good job at re-arming timers 156 * accurately. 230 * accurately, this is in part due to KeSetTimerEx API taking the interval in 231 * milliseconds. 157 232 * 158 233 * @param pTimer The timer. 159 * @param iTick The current timer tick. 160 */ 161 DECLINLINE(void) rtTimerNtRearmInternval(PRTTIMER pTimer, uint64_t iTick) 234 * @param pMasterDpc The master timer DPC for passing to KeSetTimerEx 235 * in low-resolution mode. Ignored for high-res. 236 */ 237 static void rtTimerNtRearmInternval(PRTTIMER pTimer, PKDPC pMasterDpc) 162 238 { 163 239 #ifdef RTR0TIMER_NT_MANUAL_RE_ARM 164 240 Assert(pTimer->u64NanoInterval); 165 241 166 uint64_t uNtNext = (iTick * pTimer->u64NanoInterval) / 100 - 10; /* 1us fudge */ 167 LARGE_INTEGER DueTime; 168 DueTime.QuadPart = rtTimerNtQueryInterruptTime() - pTimer->uNtStartTime; 169 if (DueTime.QuadPart < 0) 170 DueTime.QuadPart = 0; 171 if ((uint64_t)DueTime.QuadPart < uNtNext) 172 DueTime.QuadPart -= uNtNext; 173 else 174 DueTime.QuadPart = -2500; /* 0.25ms */ 175 242 /* 243 * For simplicity we acquire the spinlock for the whole operation. 244 * This should be perfectly fine as it doesn't change the IRQL. 245 */ 246 Assert(KeGetCurrentIrql() >= DISPATCH_LEVEL); 247 KeAcquireSpinLockAtDpcLevel(&pTimer->Spinlock); 248 249 /* 250 * Make sure it wasn't suspended 251 */ 252 if (!ASMAtomicUoReadBool(&pTimer->fSuspended)) 253 { 254 uint64_t const cNtInterval = ASMAtomicUoReadU64(&pTimer->u64NanoInterval) / 100; 255 uint64_t const uNtStartTime = ASMAtomicUoReadU64(&pTimer->uNtStartTime); 256 uint64_t const iTick = ++pTimer->iMasterTick; 257 258 /* 259 * Calculate the deadline for the next timer tick and arm the timer. 260 * We always use a relative tick, i.e. negative DueTime value. This is 261 * crucial for the the high resolution API as it will bugcheck otherwise. 262 */ 263 int64_t iDueTime; 264 uint64_t uNtNow; 176 265 # ifdef RTR0TIMER_NT_HIGH_RES 177 if (pTimer->pHighResTimer) 178 g_pfnrtExSetTimer(pTimer->pHighResTimer, DueTime.QuadPart, 0, NULL); 179 else 266 if (pTimer->pHighResTimer) 267 { 268 /* Must use highres time here. */ 269 uNtNow = rtTimerNtQueryInterruptTimeHighRes(); 270 iDueTime = rtTimerNtCalcNextDueTime(uNtNow, uNtStartTime, iTick, cNtInterval, 271 -100 /* 10us safety */, -2000 /* 200us min interval*/); 272 g_pfnrtExSetTimer(pTimer->pHighResTimer, iDueTime, 0, NULL); 273 } 274 else 180 275 # endif 181 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, &pTimer->aSubTimers[0].NtDpc); 276 { 277 /* Expect interrupt time and timers to expire at the same time, so 278 don't use high res time api here. */ 279 uNtNow = rtTimerNtQueryInterruptTime(); 280 iDueTime = rtTimerNtCalcNextDueTime(uNtNow, uNtStartTime, iTick, cNtInterval, 281 -100 /* 10us safety */, -2500 /* 250us min interval*/); /** @todo use max interval here */ 282 LARGE_INTEGER DueTime; 283 DueTime.QuadPart = iDueTime; 284 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc); 285 } 286 287 pTimer->uNtDueTime = uNtNow + -iDueTime; 288 } 289 290 KeReleaseSpinLockFromDpcLevel(&pTimer->Spinlock); 182 291 #else 183 RT_NOREF(pTimer, iTick );292 RT_NOREF(pTimer, iTick, pMasterDpc); 184 293 #endif 185 294 } … … 205 314 ASMAtomicWriteBool(&pTimer->fSuspended, true); 206 315 uint64_t iTick = ++pTimer->aSubTimers[0].iTick; 207 if (pTimer->u64NanoInterval) 208 rtTimerNtRearmInternval(pTimer, iTick); 316 209 317 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick); 318 319 /* We re-arm the timer after calling pfnTimer, as it may stop the timer 320 or change the interval, which would mean doing extra work. */ 321 if (!pTimer->fSuspended && pTimer->u64NanoInterval) 322 rtTimerNtRearmInternval(pTimer, &pTimer->aSubTimers[0].NtDpc); 210 323 211 324 ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, NIL_RTNATIVETHREAD); … … 322 435 static void rtTimerNtOmniMasterCallbackWorker(PRTTIMER pTimer, PRTTIMERNTSUBTIMER pSubTimer, int iCpuSelf) 323 436 { 324 #ifdef RT_STRICT325 if (KeGetCurrentIrql() < DISPATCH_LEVEL)326 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);327 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)328 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);329 #endif330 331 437 /* 332 438 * Check that we haven't been suspended before scheduling the other DPCs 333 439 * and doing the callout. 334 440 */ 335 if ( 336 && 337 { 338 RTCPUSET 441 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended) 442 && pTimer->u32Magic == RTTIMER_MAGIC) 443 { 444 RTCPUSET OnlineSet; 339 445 RTMpGetOnlineSet(&OnlineSet); 340 446 … … 347 453 */ 348 454 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) 349 if ( 350 && 455 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu) 456 && iCpuSelf != iCpu) 351 457 KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0); 352 458 353 uint64_t iTick = ++pSubTimer->iTick; 354 rtTimerNtRearmInternval(pTimer, iTick); 355 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick); 459 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick); 460 461 /* We re-arm the timer after calling pfnTimer, as it may stop the timer 462 or change the interval, which would mean doing extra work. */ 463 if (!pTimer->fSuspended && pTimer->u64NanoInterval) 464 rtTimerNtRearmInternval(pTimer, &pSubTimer->NtDpc); 356 465 } 357 466 else … … 367 476 368 477 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) 369 if ( 370 && 478 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu) 479 && iCpuSelf != iCpu) 371 480 if (!KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0)) 372 481 ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown); /* already queued and counted. */ … … 395 504 PRTTIMERNTSUBTIMER const pSubTimer = (PRTTIMERNTSUBTIMER)pvUser; 396 505 PRTTIMER const pTimer = pSubTimer->pParent; 397 int const iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId()); 506 RTCPUID idCpu = RTMpCpuId(); 507 int const iCpuSelf = RTMpCpuIdToSetIndex(idCpu); 398 508 399 509 AssertPtr(pTimer); … … 401 511 if (KeGetCurrentIrql() < DISPATCH_LEVEL) 402 512 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL); 513 /* We must be called on the master CPU or the tick variable goes south. */ 403 514 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf) 404 515 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]); 516 if (pTimer->idCpu != idCpu) 517 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: pTimer->idCpu=%d vs idCpu=%d\n", pTimer->idCpu, idCpu); 405 518 #endif 406 519 … … 446 559 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE); 447 560 448 if (!ASMAtomicUoReadBool(&pTimer->fSuspended)) 561 /* 562 * The operation is protected by the spinlock. 563 */ 564 KIRQL bSavedIrql; 565 KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql); 566 567 /* 568 * Check the state. 569 */ 570 if (ASMAtomicUoReadBool(&pTimer->fSuspended)) 571 { /* likely */ } 572 else 573 { 574 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql); 449 575 return VERR_TIMER_ACTIVE; 450 if ( pTimer->fSpecificCpu 451 && !RTMpIsCpuOnline(pTimer->idCpu)) 576 } 577 if ( !pTimer->fSpecificCpu 578 || RTMpIsCpuOnline(pTimer->idCpu)) 579 { /* likely */ } 580 else 581 { 582 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql); 452 583 return VERR_CPU_OFFLINE; 453 454 /* 455 * Start the timer. 456 */ 457 PKDPC pMasterDpc = pTimer->fOmniTimer 458 ? &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc 459 : &pTimer->aSubTimers[0].NtDpc; 460 584 } 585 586 /* 587 * Do the starting. 588 */ 461 589 #ifndef RTR0TIMER_NT_MANUAL_RE_ARM 590 /* Calculate the interval time: */ 462 591 uint64_t u64Interval = pTimer->u64NanoInterval / 1000000; /* This is ms, believe it or not. */ 463 592 ULONG ulInterval = (ULONG)u64Interval; … … 468 597 #endif 469 598 599 /* Translate u64First to a DueTime: */ 470 600 LARGE_INTEGER DueTime; 471 601 DueTime.QuadPart = -(int64_t)(u64First / 100); /* Relative, NT time. */ 472 602 if (!DueTime.QuadPart) 473 DueTime.QuadPart = -1; 474 603 DueTime.QuadPart = -10; /* 1us */ 604 605 /* Reset tick counters: */ 475 606 unsigned cSubTimers = pTimer->fOmniTimer ? pTimer->cSubTimers : 1; 476 607 for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++) 477 608 pTimer->aSubTimers[iCpu].iTick = 0; 609 pTimer->iMasterTick = 0; 610 611 /* Update timer state: */ 478 612 #ifdef RTR0TIMER_NT_MANUAL_RE_ARM 479 pTimer->uNtStartTime = rtTimerNtQueryInterruptTime() + u64First / 100;613 pTimer->uNtStartTime = rtTimerNtQueryInterruptTime() + -DueTime.QuadPart; 480 614 #endif 481 615 ASMAtomicWriteS32(&pTimer->cOmniSuspendCountDown, 0); 482 616 ASMAtomicWriteBool(&pTimer->fSuspended, false); 617 618 /* 619 * Finally start the NT timer. 620 * 621 * We do this without holding the spinlock to err on the side of 622 * caution in case ExSetTimer or KeSetTimerEx ever should have the idea 623 * of running the callback before returning. 624 */ 625 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql); 483 626 484 627 #ifdef RTR0TIMER_NT_HIGH_RES … … 494 637 #endif 495 638 { 639 PKDPC const pMasterDpc = &pTimer->aSubTimers[pTimer->fOmniTimer ? RTMpCpuIdToSetIndex(pTimer->idCpu) : 0].NtDpc; 496 640 #ifdef RTR0TIMER_NT_MANUAL_RE_ARM 497 641 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc); … … 511 655 * @param pTimer The active timer. 512 656 */ 513 static void rtTimerNtStopWorker(PRTTIMER pTimer) 514 { 515 /* 516 * Just cancel the timer, dequeue the DPCs and flush them (if this is supported). 517 */ 518 ASMAtomicWriteBool(&pTimer->fSuspended, true); 519 657 static int rtTimerNtStopWorker(PRTTIMER pTimer) 658 { 659 /* 660 * Update the state from with the spinlock context. 661 */ 662 KIRQL bSavedIrql; 663 KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql); 664 665 bool const fWasSuspended = ASMAtomicXchgBool(&pTimer->fSuspended, true); 666 667 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql); 668 if (!fWasSuspended) 669 { 670 /* 671 * We should cacnel the timer and dequeue DPCs. 672 */ 520 673 #ifdef RTR0TIMER_NT_HIGH_RES 521 if (pTimer->pHighResTimer) 522 g_pfnrtExCancelTimer(pTimer->pHighResTimer, NULL); 523 else 524 #endif 525 KeCancelTimer(&pTimer->NtTimer); 526 527 for (RTCPUID iCpu = 0; iCpu < pTimer->cSubTimers; iCpu++) 528 KeRemoveQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc); 674 if (pTimer->pHighResTimer) 675 g_pfnrtExCancelTimer(pTimer->pHighResTimer, NULL); 676 else 677 #endif 678 KeCancelTimer(&pTimer->NtTimer); 679 680 for (RTCPUID iCpu = 0; iCpu < pTimer->cSubTimers; iCpu++) 681 KeRemoveQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc); 682 return VINF_SUCCESS; 683 } 684 return VERR_TIMER_SUSPENDED; 529 685 } 530 686 … … 538 694 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE); 539 695 540 if (ASMAtomicUoReadBool(&pTimer->fSuspended))541 return VERR_TIMER_SUSPENDED;542 543 696 /* 544 697 * Call the worker we share with RTTimerDestroy. 545 698 */ 546 rtTimerNtStopWorker(pTimer); 547 return VINF_SUCCESS; 699 return rtTimerNtStopWorker(pTimer); 548 700 } 549 701 … … 553 705 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE); 554 706 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE); 555 RT_NOREF1(u64NanoInterval); 556 557 return VERR_NOT_SUPPORTED; 707 708 /* 709 * We do all the state changes while holding the spinlock. 710 */ 711 int rc = VINF_SUCCESS; 712 KIRQL bSavedIrql; 713 KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql); 714 715 /* 716 * When the timer isn't running, this is an simple job: 717 */ 718 if (!ASMAtomicUoReadBool(&pTimer->fSuspended)) 719 pTimer->u64NanoInterval = u64NanoInterval; 720 else 721 { 722 /* 723 * We only implement changing the interval in RTR0TIMER_NT_MANUAL_RE_ARM 724 * mode right now. We typically let the new interval take effect after 725 * the next timer callback, unless that's too far ahead. 726 */ 727 #ifdef RTR0TIMER_NT_MANUAL_RE_ARM 728 pTimer->u64NanoInterval = u64NanoInterval; 729 pTimer->iMasterTick = 0; 730 # ifdef RTR0TIMER_NT_HIGH_RES 731 uint64_t const uNtNow = pTimer->pHighResTimer ? rtTimerNtQueryInterruptTimeHighRes() : rtTimerNtQueryInterruptTime(); 732 # else 733 uint64_t const uNtNow = rtTimerNtQueryInterruptTime(); 734 # endif 735 if (uNtNow >= pTimer->uNtDueTime) 736 pTimer->uNtStartTime = uNtNow; 737 else 738 { 739 pTimer->uNtStartTime = pTimer->uNtDueTime; 740 741 /* 742 * Re-arm the timer if the next DueTime is both more than 1.25 new 743 * intervals and at least 0.5 ms ahead. 744 */ 745 uint64_t cNtToNext = pTimer->uNtDueTime - uNtNow; 746 if ( cNtToNext >= RT_NS_1MS / 2 / 100 /* 0.5 ms */ 747 && cNtToNext * 100 > u64NanoInterval + u64NanoInterval / 4) 748 { 749 pTimer->uNtStartTime = pTimer->uNtDueTime = uNtNow + u64NanoInterval / 100; 750 # ifdef RTR0TIMER_NT_HIGH_RES 751 if (pTimer->pHighResTimer) 752 g_pfnrtExSetTimer(pTimer->pHighResTimer, -(int64_t)u64NanoInterval / 100, 0, NULL); 753 else 754 # endif 755 { 756 LARGE_INTEGER DueTime; 757 DueTime.QuadPart = -(int64_t)u64NanoInterval / 100; 758 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, 759 &pTimer->aSubTimers[pTimer->fOmniTimer ? RTMpCpuIdToSetIndex(pTimer->idCpu) : 0].NtDpc); 760 } 761 } 762 } 763 #else 764 rc = VERR_NOT_SUPPORTED; 765 #endif 766 } 767 768 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql); 769 770 return rc; 558 771 } 559 772 … … 574 787 575 788 /* 576 * Invalidate the timer, stop it if it's running and finally 577 * free up the memory. 789 * Invalidate the timer, stop it if it's running and finally free up the memory. 578 790 */ 579 791 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC); 580 if (!ASMAtomicUoReadBool(&pTimer->fSuspended)) 581 rtTimerNtStopWorker(pTimer); 792 rtTimerNtStopWorker(pTimer); 582 793 583 794 #ifdef RTR0TIMER_NT_HIGH_RES … … 650 861 pTimer->pfnTimer = pfnTimer; 651 862 pTimer->pvUser = pvUser; 863 KeInitializeSpinLock(&pTimer->Spinlock); 652 864 pTimer->u64NanoInterval = u64NanoInterval; 653 865 … … 684 896 * called on. 685 897 */ 686 pTimer->idCpu = NIL_RTCPUID; 898 pTimer->iMasterTick = 0; 899 pTimer->idCpu = NIL_RTCPUID; 687 900 for (unsigned iCpu = 0; iCpu < cSubTimers && RT_SUCCESS(rc); iCpu++) 688 901 { 689 pTimer->aSubTimers[iCpu].iTick = 0;902 pTimer->aSubTimers[iCpu].iTick = 0; 690 903 pTimer->aSubTimers[iCpu].pParent = pTimer; 691 904 … … 715 928 * if requested to do so. 716 929 */ 717 pTimer->aSubTimers[0].iTick = 0; 930 pTimer->iMasterTick = 0; 931 pTimer->aSubTimers[0].iTick = 0; 718 932 pTimer->aSubTimers[0].pParent = pTimer; 719 933
Note:
See TracChangeset
for help on using the changeset viewer.