VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/timer-r0drv-nt.cpp@ 100357

Last change on this file since 100357 was 99758, checked in by vboxsync, 19 months ago

IPRT: Make doxygen 1.9.6 happy. Mostly removing duplicate docs (iprt is documented in the header files). bugref:10442

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 38.7 KB
Line 
1/* $Id: timer-r0drv-nt.cpp 99758 2023-05-11 21:37:59Z vboxsync $ */
2/** @file
3 * IPRT - Timers, Ring-0 Driver, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "the-nt-kernel.h"
42
43#include <iprt/timer.h>
44#include <iprt/mp.h>
45#include <iprt/cpuset.h>
46#include <iprt/err.h>
47#include <iprt/asm.h>
48#include <iprt/assert.h>
49#include <iprt/mem.h>
50#include <iprt/thread.h>
51
52#include "internal-r0drv-nt.h"
53#include "internal/magics.h"
54
55
56/*********************************************************************************************************************************
57* Defined Constants And Macros *
58*********************************************************************************************************************************/
59/** This seems to provide better accuracy. */
60#define RTR0TIMER_NT_MANUAL_RE_ARM 1
61
62#if !defined(IN_GUEST) || defined(DOXYGEN_RUNNING)
63/** This using high resolution timers introduced with windows 8.1. */
64# define RTR0TIMER_NT_HIGH_RES 1
65#endif
66
67
68/*********************************************************************************************************************************
69* Structures and Typedefs *
70*********************************************************************************************************************************/
71/**
72 * A sub timer structure.
73 *
74 * This is used for keeping the per-cpu tick and DPC object.
75 */
76typedef struct RTTIMERNTSUBTIMER
77{
78 /** The tick counter. */
79 uint64_t iTick;
80 /** Pointer to the parent timer. */
81 PRTTIMER pParent;
82 /** Thread active executing the worker function, NIL if inactive. */
83 RTNATIVETHREAD volatile hActiveThread;
84 /** The NT DPC object. */
85 KDPC NtDpc;
86 /** Whether we failed to set the target CPU for the DPC and that this needs
87 * to be done at RTTimerStart (simple timers) or during timer callback (omni). */
88 bool fDpcNeedTargetCpuSet;
89} RTTIMERNTSUBTIMER;
90/** Pointer to a NT sub-timer structure. */
91typedef RTTIMERNTSUBTIMER *PRTTIMERNTSUBTIMER;
92
93/**
94 * The internal representation of an Linux timer handle.
95 */
96typedef struct RTTIMER
97{
98 /** Magic.
99 * This is RTTIMER_MAGIC, but changes to something else before the timer
100 * is destroyed to indicate clearly that thread should exit. */
101 uint32_t volatile u32Magic;
102 /** Suspend count down for single shot omnit timers. */
103 int32_t volatile cOmniSuspendCountDown;
104 /** Flag indicating the timer is suspended. */
105 bool volatile fSuspended;
106 /** Whether the timer must run on one specific CPU or not. */
107 bool fSpecificCpu;
108 /** Whether the timer must run on all CPUs or not. */
109 bool fOmniTimer;
110 /** The CPU it must run on if fSpecificCpu is set.
111 * The master CPU for an omni-timer. */
112 RTCPUID idCpu;
113 /** Callback. */
114 PFNRTTIMER pfnTimer;
115 /** User argument. */
116 void *pvUser;
117
118 /** @name Periodic scheduling / RTTimerChangeInterval.
119 * @{ */
120 /** Spinlock protecting the u64NanoInterval, iMasterTick, uNtStartTime,
121 * uNtDueTime and (at least for updating) fSuspended. */
122 KSPIN_LOCK Spinlock;
123 /** The timer interval. 0 if one-shot. */
124 uint64_t volatile u64NanoInterval;
125 /** The the current master tick. This does not necessarily follow that of
126 * the subtimer, as RTTimerChangeInterval may cause it to reset. */
127 uint64_t volatile iMasterTick;
128#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
129 /** The desired NT time of the first tick.
130 * This is not set for one-shot timers, only periodic ones. */
131 uint64_t volatile uNtStartTime;
132 /** The current due time (absolute interrupt time).
133 * This is not set for one-shot timers, only periodic ones. */
134 uint64_t volatile uNtDueTime;
135#endif
136 /** @} */
137
138 /** The NT timer object. */
139 KTIMER NtTimer;
140#ifdef RTR0TIMER_NT_HIGH_RES
141 /** High resolution timer. If not NULL, this must be used instead of NtTimer. */
142 PEX_TIMER pHighResTimer;
143#endif
144 /** The number of sub-timers. */
145 RTCPUID cSubTimers;
146 /** Sub-timers.
147 * Normally there is just one, but for RTTIMER_FLAGS_CPU_ALL this will contain
148 * an entry for all possible cpus. In that case the index will be the same as
149 * for the RTCpuSet. */
150 RTTIMERNTSUBTIMER aSubTimers[1];
151} RTTIMER;
152
153
154#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
155
156/**
157 * Get current NT interrupt time.
158 * @return NT interrupt time
159 */
160static uint64_t rtTimerNtQueryInterruptTime(void)
161{
162# ifdef RT_ARCH_AMD64
163 return KeQueryInterruptTime(); /* macro */
164# else
165 if (g_pfnrtKeQueryInterruptTime)
166 return g_pfnrtKeQueryInterruptTime();
167
168 /* NT4 */
169 ULARGE_INTEGER InterruptTime;
170 do
171 {
172 InterruptTime.HighPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High1Time;
173 InterruptTime.LowPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.LowPart;
174 } while (((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High2Time != (LONG)InterruptTime.HighPart);
175 return InterruptTime.QuadPart;
176# endif
177}
178
179/**
180 * Get current NT interrupt time, high resolution variant.
181 * @return High resolution NT interrupt time
182 */
183static uint64_t rtTimerNtQueryInterruptTimeHighRes(void)
184{
185 if (g_pfnrtKeQueryInterruptTimePrecise)
186 {
187 ULONG64 uQpcIgnored;
188 return g_pfnrtKeQueryInterruptTimePrecise(&uQpcIgnored);
189 }
190 return rtTimerNtQueryInterruptTime();
191}
192
193#endif /* RTR0TIMER_NT_MANUAL_RE_ARM */
194
195
196/**
197 * Worker for rtTimerNtRearmInternval that calculates the next due time.
198 *
199 * @returns The next due time (relative, so always negative).
200 * @param uNtNow The current time.
201 * @param uNtStartTime The start time of the timer.
202 * @param iTick The next tick number (zero being @a uNtStartTime).
203 * @param cNtInterval The timer interval in NT ticks.
204 * @param cNtNegDueSaftyMargin The due time safety margin in negative NT
205 * ticks.
206 * @param cNtMinNegInterval The minium interval to use when in catchup
207 * mode, also negative NT ticks.
208 */
209DECLINLINE(int64_t) rtTimerNtCalcNextDueTime(uint64_t uNtNow, uint64_t uNtStartTime, uint64_t iTick, uint64_t cNtInterval,
210 int32_t const cNtNegDueSaftyMargin, int32_t const cNtMinNegInterval)
211{
212 /* Calculate the actual time elapsed since timer start: */
213 int64_t iDueTime = uNtNow - uNtStartTime;
214 if (iDueTime < 0)
215 iDueTime = 0;
216
217 /* Now calculate the nominal time since timer start for the next tick: */
218 uint64_t const uNtNextRelStart = iTick * cNtInterval;
219
220 /* Calulate now much time we have to the next tick: */
221 iDueTime -= uNtNextRelStart;
222
223 /* If we haven't already overshot the due time, including some safety margin, we're good: */
224 if (iDueTime < cNtNegDueSaftyMargin)
225 return iDueTime;
226
227 /* Okay, we've overshot it and are in catchup mode: */
228 if (iDueTime < (int64_t)cNtInterval)
229 iDueTime = -(int64_t)(cNtInterval / 2); /* double time */
230 else if (iDueTime < (int64_t)(cNtInterval * 4))
231 iDueTime = -(int64_t)(cNtInterval / 4); /* quadruple time */
232 else
233 return cNtMinNegInterval;
234
235 /* Make sure we don't try intervals smaller than the minimum specified by the caller: */
236 if (iDueTime > cNtMinNegInterval)
237 iDueTime = cNtMinNegInterval;
238 return iDueTime;
239}
240
241/**
242 * Manually re-arms an internval timer.
243 *
244 * Turns out NT doesn't necessarily do a very good job at re-arming timers
245 * accurately, this is in part due to KeSetTimerEx API taking the interval in
246 * milliseconds.
247 *
248 * @param pTimer The timer.
249 * @param pMasterDpc The master timer DPC for passing to KeSetTimerEx
250 * in low-resolution mode. Ignored for high-res.
251 */
252static void rtTimerNtRearmInternval(PRTTIMER pTimer, PKDPC pMasterDpc)
253{
254#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
255 Assert(pTimer->u64NanoInterval);
256
257 /*
258 * For simplicity we acquire the spinlock for the whole operation.
259 * This should be perfectly fine as it doesn't change the IRQL.
260 */
261 Assert(KeGetCurrentIrql() >= DISPATCH_LEVEL);
262 KeAcquireSpinLockAtDpcLevel(&pTimer->Spinlock);
263
264 /*
265 * Make sure it wasn't suspended
266 */
267 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
268 {
269 uint64_t const cNtInterval = ASMAtomicUoReadU64(&pTimer->u64NanoInterval) / 100;
270 uint64_t const uNtStartTime = ASMAtomicUoReadU64(&pTimer->uNtStartTime);
271 uint64_t const iTick = ++pTimer->iMasterTick;
272
273 /*
274 * Calculate the deadline for the next timer tick and arm the timer.
275 * We always use a relative tick, i.e. negative DueTime value. This is
276 * crucial for the the high resolution API as it will bugcheck otherwise.
277 */
278 int64_t iDueTime;
279 uint64_t uNtNow;
280# ifdef RTR0TIMER_NT_HIGH_RES
281 if (pTimer->pHighResTimer)
282 {
283 /* Must use highres time here. */
284 uNtNow = rtTimerNtQueryInterruptTimeHighRes();
285 iDueTime = rtTimerNtCalcNextDueTime(uNtNow, uNtStartTime, iTick, cNtInterval,
286 -100 /* 10us safety */, -2000 /* 200us min interval*/);
287 g_pfnrtExSetTimer(pTimer->pHighResTimer, iDueTime, 0, NULL);
288 }
289 else
290# endif
291 {
292 /* Expect interrupt time and timers to expire at the same time, so
293 don't use high res time api here. */
294 uNtNow = rtTimerNtQueryInterruptTime();
295 iDueTime = rtTimerNtCalcNextDueTime(uNtNow, uNtStartTime, iTick, cNtInterval,
296 -100 /* 10us safety */, -2500 /* 250us min interval*/); /** @todo use max interval here */
297 LARGE_INTEGER DueTime;
298 DueTime.QuadPart = iDueTime;
299 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc);
300 }
301
302 pTimer->uNtDueTime = uNtNow + -iDueTime;
303 }
304
305 KeReleaseSpinLockFromDpcLevel(&pTimer->Spinlock);
306#else
307 RT_NOREF(pTimer, iTick, pMasterDpc);
308#endif
309}
310
311
312/**
313 * Common timer callback worker for the non-omni timers.
314 *
315 * @param pTimer The timer.
316 */
317static void rtTimerNtSimpleCallbackWorker(PRTTIMER pTimer)
318{
319 /*
320 * Check that we haven't been suspended before doing the callout.
321 */
322 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
323 && pTimer->u32Magic == RTTIMER_MAGIC)
324 {
325 ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, RTThreadNativeSelf());
326
327 if (!pTimer->u64NanoInterval)
328 ASMAtomicWriteBool(&pTimer->fSuspended, true);
329 uint64_t iTick = ++pTimer->aSubTimers[0].iTick;
330
331 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
332
333 /* We re-arm the timer after calling pfnTimer, as it may stop the timer
334 or change the interval, which would mean doing extra work. */
335 if (!pTimer->fSuspended && pTimer->u64NanoInterval)
336 rtTimerNtRearmInternval(pTimer, &pTimer->aSubTimers[0].NtDpc);
337
338 ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, NIL_RTNATIVETHREAD);
339 }
340}
341
342
343/**
344 * Timer callback function for the low-resolution non-omni timers.
345 *
346 * @param pDpc Pointer to the DPC.
347 * @param pvUser Pointer to our internal timer structure.
348 * @param SystemArgument1 Some system argument.
349 * @param SystemArgument2 Some system argument.
350 */
351static void _stdcall rtTimerNtSimpleCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
352{
353 PRTTIMER pTimer = (PRTTIMER)pvUser;
354 AssertPtr(pTimer);
355#ifdef RT_STRICT
356 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
357 RTAssertMsg2Weak("rtTimerNtSimpleCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
358#endif
359
360 rtTimerNtSimpleCallbackWorker(pTimer);
361
362 RT_NOREF(pDpc, SystemArgument1, SystemArgument2);
363}
364
365
366#ifdef RTR0TIMER_NT_HIGH_RES
367/**
368 * Timer callback function for the high-resolution non-omni timers.
369 *
370 * @param pExTimer The windows timer.
371 * @param pvUser Pointer to our internal timer structure.
372 */
373static void _stdcall rtTimerNtHighResSimpleCallback(PEX_TIMER pExTimer, void *pvUser)
374{
375 PRTTIMER pTimer = (PRTTIMER)pvUser;
376 AssertPtr(pTimer);
377 Assert(pTimer->pHighResTimer == pExTimer);
378# ifdef RT_STRICT
379 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
380 RTAssertMsg2Weak("rtTimerNtHighResSimpleCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
381# endif
382
383 /* If we're not on the desired CPU, trigger the DPC. That will rearm the
384 timer and such. */
385 if ( !pTimer->fSpecificCpu
386 || pTimer->idCpu == RTMpCpuId())
387 rtTimerNtSimpleCallbackWorker(pTimer);
388 else
389 KeInsertQueueDpc(&pTimer->aSubTimers[0].NtDpc, 0, 0);
390
391 RT_NOREF(pExTimer);
392}
393#endif /* RTR0TIMER_NT_HIGH_RES */
394
395
396/**
397 * The slave DPC callback for an omni timer.
398 *
399 * @param pDpc The DPC object.
400 * @param pvUser Pointer to the sub-timer.
401 * @param SystemArgument1 Some system stuff.
402 * @param SystemArgument2 Some system stuff.
403 */
404static void _stdcall rtTimerNtOmniSlaveCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
405{
406 PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
407 PRTTIMER pTimer = pSubTimer->pParent;
408
409 AssertPtr(pTimer);
410#ifdef RT_STRICT
411 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
412 RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
413 int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
414 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
415 RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
416#endif
417
418 /*
419 * Check that we haven't been suspended before doing the callout.
420 */
421 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
422 && pTimer->u32Magic == RTTIMER_MAGIC)
423 {
424 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());
425
426 if (!pTimer->u64NanoInterval)
427 if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
428 ASMAtomicWriteBool(&pTimer->fSuspended, true);
429
430 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
431
432 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
433 }
434
435 NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
436}
437
438
439/**
440 * Called when we have an impcomplete DPC object.
441 *
442 * @returns KeInsertQueueDpc return value.
443 * @param pSubTimer The sub-timer to queue an DPC for.
444 * @param iCpu The CPU set index corresponding to that sub-timer.
445 */
446DECL_NO_INLINE(static, BOOLEAN) rtTimerNtOmniQueueDpcSlow(PRTTIMERNTSUBTIMER pSubTimer, int iCpu)
447{
448 int rc = rtMpNtSetTargetProcessorDpc(&pSubTimer->NtDpc, RTMpCpuIdFromSetIndex(iCpu));
449 if (RT_SUCCESS(rc))
450 {
451 pSubTimer->fDpcNeedTargetCpuSet = false;
452 return KeInsertQueueDpc(&pSubTimer->NtDpc, 0, 0);
453 }
454 return FALSE;
455}
456
457
458/**
459 * Wrapper around KeInsertQueueDpc that makes sure the target CPU has been set.
460 *
461 * This is for handling deferred rtMpNtSetTargetProcessorDpc failures during
462 * creation. These errors happens for offline CPUs which probably never every
463 * will come online, as very few systems do CPU hotplugging.
464 *
465 * @returns KeInsertQueueDpc return value.
466 * @param pSubTimer The sub-timer to queue an DPC for.
467 * @param iCpu The CPU set index corresponding to that sub-timer.
468 */
469DECLINLINE(BOOLEAN) rtTimerNtOmniQueueDpc(PRTTIMERNTSUBTIMER pSubTimer, int iCpu)
470{
471 if (RT_LIKELY(!pSubTimer->fDpcNeedTargetCpuSet))
472 return KeInsertQueueDpc(&pSubTimer->NtDpc, 0, 0);
473 return rtTimerNtOmniQueueDpcSlow(pSubTimer, iCpu);
474}
475
476
477/**
478 * Common timer callback worker for omni-timers.
479 *
480 * This is responsible for queueing the DPCs for the other CPUs and
481 * perform the callback on the CPU on which it is called.
482 *
483 * @param pTimer The timer.
484 * @param pSubTimer The sub-timer of the calling CPU.
485 * @param iCpuSelf The set index of the CPU we're running on.
486 */
487static void rtTimerNtOmniMasterCallbackWorker(PRTTIMER pTimer, PRTTIMERNTSUBTIMER pSubTimer, int iCpuSelf)
488{
489 /*
490 * Check that we haven't been suspended before scheduling the other DPCs
491 * and doing the callout.
492 */
493 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
494 && pTimer->u32Magic == RTTIMER_MAGIC)
495 {
496 RTCPUSET OnlineSet;
497 RTMpGetOnlineSet(&OnlineSet);
498
499 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());
500
501 if (pTimer->u64NanoInterval)
502 {
503 /*
504 * Recurring timer.
505 */
506 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
507 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
508 && iCpuSelf != iCpu)
509 rtTimerNtOmniQueueDpc(&pTimer->aSubTimers[iCpu], iCpu);
510
511 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
512
513 /* We re-arm the timer after calling pfnTimer, as it may stop the timer
514 or change the interval, which would mean doing extra work. */
515 if (!pTimer->fSuspended && pTimer->u64NanoInterval)
516 rtTimerNtRearmInternval(pTimer, &pSubTimer->NtDpc);
517 }
518 else
519 {
520 /*
521 * Single shot timers gets complicated wrt to fSuspended maintance.
522 */
523 uint32_t cCpus = 0;
524 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
525 if (RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
526 cCpus++;
527 ASMAtomicAddS32(&pTimer->cOmniSuspendCountDown, cCpus); /** @todo this is bogus bogus bogus. The counter is only used here. */
528
529 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
530 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
531 && iCpuSelf != iCpu)
532 if (!rtTimerNtOmniQueueDpc(&pTimer->aSubTimers[iCpu], iCpu))
533 ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown); /* already queued and counted. */
534
535 if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
536 ASMAtomicWriteBool(&pTimer->fSuspended, true);
537
538 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
539 }
540
541 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
542 }
543}
544
545
546/**
547 * The timer callback for an omni-timer, low-resolution.
548 *
549 * @param pDpc The DPC object.
550 * @param pvUser Pointer to the sub-timer.
551 * @param SystemArgument1 Some system stuff.
552 * @param SystemArgument2 Some system stuff.
553 */
554static void _stdcall rtTimerNtOmniMasterCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
555{
556 PRTTIMERNTSUBTIMER const pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
557 PRTTIMER const pTimer = pSubTimer->pParent;
558 RTCPUID idCpu = RTMpCpuId();
559 int const iCpuSelf = RTMpCpuIdToSetIndex(idCpu);
560
561 AssertPtr(pTimer);
562#ifdef RT_STRICT
563 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
564 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
565 /* We must be called on the master CPU or the tick variable goes south. */
566 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
567 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
568 if (pTimer->idCpu != idCpu)
569 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: pTimer->idCpu=%d vs idCpu=%d\n", pTimer->idCpu, idCpu);
570#endif
571
572 rtTimerNtOmniMasterCallbackWorker(pTimer, pSubTimer, iCpuSelf);
573
574 RT_NOREF(pDpc, SystemArgument1, SystemArgument2);
575}
576
577
578#ifdef RTR0TIMER_NT_HIGH_RES
579/**
580 * The timer callback for an high-resolution omni-timer.
581 *
582 * @param pExTimer The windows timer.
583 * @param pvUser Pointer to our internal timer structure.
584 */
585static void __stdcall rtTimerNtHighResOmniCallback(PEX_TIMER pExTimer, void *pvUser)
586{
587 PRTTIMER const pTimer = (PRTTIMER)pvUser;
588 int const iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
589 PRTTIMERNTSUBTIMER const pSubTimer = &pTimer->aSubTimers[iCpuSelf];
590
591 AssertPtr(pTimer);
592 Assert(pTimer->pHighResTimer == pExTimer);
593# ifdef RT_STRICT
594 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
595 RTAssertMsg2Weak("rtTimerNtHighResOmniCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
596# endif
597
598 rtTimerNtOmniMasterCallbackWorker(pTimer, pSubTimer, iCpuSelf);
599
600 RT_NOREF(pExTimer);
601}
602#endif /* RTR0TIMER_NT_HIGH_RES */
603
604
605RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
606{
607 /*
608 * Validate.
609 */
610 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
611 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
612
613 /*
614 * The operation is protected by the spinlock.
615 */
616 KIRQL bSavedIrql;
617 KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql);
618
619 /*
620 * Check the state.
621 */
622 if (ASMAtomicUoReadBool(&pTimer->fSuspended))
623 { /* likely */ }
624 else
625 {
626 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
627 return VERR_TIMER_ACTIVE;
628 }
629 if ( !pTimer->fSpecificCpu
630 || RTMpIsCpuOnline(pTimer->idCpu))
631 { /* likely */ }
632 else
633 {
634 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
635 return VERR_CPU_OFFLINE;
636 }
637
638 /*
639 * Lazy set the DPC target CPU if needed.
640 */
641 if ( !pTimer->fSpecificCpu
642 || !pTimer->aSubTimers[0].fDpcNeedTargetCpuSet)
643 { /* likely */ }
644 else
645 {
646 int rc = rtMpNtSetTargetProcessorDpc(&pTimer->aSubTimers[0].NtDpc, pTimer->idCpu);
647 if (RT_FAILURE(rc))
648 {
649 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
650 return rc;
651 }
652 }
653
654 /*
655 * Do the starting.
656 */
657#ifndef RTR0TIMER_NT_MANUAL_RE_ARM
658 /* Calculate the interval time: */
659 uint64_t u64Interval = pTimer->u64NanoInterval / 1000000; /* This is ms, believe it or not. */
660 ULONG ulInterval = (ULONG)u64Interval;
661 if (ulInterval != u64Interval)
662 ulInterval = MAXLONG;
663 else if (!ulInterval && pTimer->u64NanoInterval)
664 ulInterval = 1;
665#endif
666
667 /* Translate u64First to a DueTime: */
668 LARGE_INTEGER DueTime;
669 DueTime.QuadPart = -(int64_t)(u64First / 100); /* Relative, NT time. */
670 if (!DueTime.QuadPart)
671 DueTime.QuadPart = -10; /* 1us */
672
673 /* Reset tick counters: */
674 unsigned cSubTimers = pTimer->fOmniTimer ? pTimer->cSubTimers : 1;
675 for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++)
676 pTimer->aSubTimers[iCpu].iTick = 0;
677 pTimer->iMasterTick = 0;
678
679 /* Update timer state: */
680#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
681 if (pTimer->u64NanoInterval > 0)
682 {
683#ifdef RTR0TIMER_NT_HIGH_RES
684 uint64_t const uNtNow = pTimer->pHighResTimer ? rtTimerNtQueryInterruptTimeHighRes() : rtTimerNtQueryInterruptTime();
685# else
686 uint64_t const uNtNow = rtTimerNtQueryInterruptTime();
687# endif
688 pTimer->uNtStartTime = uNtNow + -DueTime.QuadPart;
689 pTimer->uNtDueTime = pTimer->uNtStartTime;
690 }
691#endif
692 pTimer->cOmniSuspendCountDown = 0;
693 ASMAtomicWriteBool(&pTimer->fSuspended, false);
694
695 /*
696 * Finally start the NT timer.
697 *
698 * We do this without holding the spinlock to err on the side of
699 * caution in case ExSetTimer or KeSetTimerEx ever should have the idea
700 * of running the callback before returning.
701 */
702 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
703
704#ifdef RTR0TIMER_NT_HIGH_RES
705 if (pTimer->pHighResTimer)
706 {
707# ifdef RTR0TIMER_NT_MANUAL_RE_ARM
708 g_pfnrtExSetTimer(pTimer->pHighResTimer, DueTime.QuadPart, 0, NULL);
709# else
710 g_pfnrtExSetTimer(pTimer->pHighResTimer, DueTime.QuadPart, RT_MIN(pTimer->u64NanoInterval / 100, MAXLONG), NULL);
711# endif
712 }
713 else
714#endif
715 {
716 PKDPC const pMasterDpc = &pTimer->aSubTimers[pTimer->fOmniTimer ? RTMpCpuIdToSetIndex(pTimer->idCpu) : 0].NtDpc;
717#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
718 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc);
719#else
720 KeSetTimerEx(&pTimer->NtTimer, DueTime, ulInterval, pMasterDpc);
721#endif
722 }
723 return VINF_SUCCESS;
724}
725
726
727/**
728 * Worker function that stops an active timer.
729 *
730 * Shared by RTTimerStop and RTTimerDestroy.
731 *
732 * @param pTimer The active timer.
733 */
734static int rtTimerNtStopWorker(PRTTIMER pTimer)
735{
736 /*
737 * Update the state from with the spinlock context.
738 */
739 KIRQL bSavedIrql;
740 KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql);
741
742 bool const fWasSuspended = ASMAtomicXchgBool(&pTimer->fSuspended, true);
743
744 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
745 if (!fWasSuspended)
746 {
747 /*
748 * We should cacnel the timer and dequeue DPCs.
749 */
750#ifdef RTR0TIMER_NT_HIGH_RES
751 if (pTimer->pHighResTimer)
752 {
753 g_pfnrtExCancelTimer(pTimer->pHighResTimer, NULL);
754
755 /* We can skip the DPC stuff, unless this is an omni timer or for a specific CPU. */
756 if (!pTimer->fSpecificCpu && !pTimer->fOmniTimer)
757 return VINF_SUCCESS;
758 }
759 else
760#endif
761 KeCancelTimer(&pTimer->NtTimer);
762
763 for (RTCPUID iCpu = 0; iCpu < pTimer->cSubTimers; iCpu++)
764 KeRemoveQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc);
765 return VINF_SUCCESS;
766 }
767 return VERR_TIMER_SUSPENDED;
768}
769
770
771RTDECL(int) RTTimerStop(PRTTIMER pTimer)
772{
773 /*
774 * Validate.
775 */
776 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
777 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
778
779 /*
780 * Call the worker we share with RTTimerDestroy.
781 */
782 return rtTimerNtStopWorker(pTimer);
783}
784
785
786RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
787{
788 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
789 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
790
791 /*
792 * We do all the state changes while holding the spinlock.
793 */
794 int rc = VINF_SUCCESS;
795 KIRQL bSavedIrql;
796 KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql);
797
798 /*
799 * When the timer isn't running, this is an simple job:
800 */
801 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
802 pTimer->u64NanoInterval = u64NanoInterval;
803 else
804 {
805 /*
806 * We only implement changing the interval in RTR0TIMER_NT_MANUAL_RE_ARM
807 * mode right now. We typically let the new interval take effect after
808 * the next timer callback, unless that's too far ahead.
809 */
810#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
811 pTimer->u64NanoInterval = u64NanoInterval;
812 pTimer->iMasterTick = 0;
813# ifdef RTR0TIMER_NT_HIGH_RES
814 uint64_t const uNtNow = pTimer->pHighResTimer ? rtTimerNtQueryInterruptTimeHighRes() : rtTimerNtQueryInterruptTime();
815# else
816 uint64_t const uNtNow = rtTimerNtQueryInterruptTime();
817# endif
818 if (uNtNow >= pTimer->uNtDueTime)
819 pTimer->uNtStartTime = uNtNow;
820 else
821 {
822 pTimer->uNtStartTime = pTimer->uNtDueTime;
823
824 /*
825 * Re-arm the timer if the next DueTime is both more than 1.25 new
826 * intervals and at least 0.5 ms ahead.
827 */
828 uint64_t cNtToNext = pTimer->uNtDueTime - uNtNow;
829 if ( cNtToNext >= RT_NS_1MS / 2 / 100 /* 0.5 ms */
830 && cNtToNext * 100 > u64NanoInterval + u64NanoInterval / 4)
831 {
832 pTimer->uNtStartTime = pTimer->uNtDueTime = uNtNow + u64NanoInterval / 100;
833# ifdef RTR0TIMER_NT_HIGH_RES
834 if (pTimer->pHighResTimer)
835 g_pfnrtExSetTimer(pTimer->pHighResTimer, -(int64_t)u64NanoInterval / 100, 0, NULL);
836 else
837# endif
838 {
839 LARGE_INTEGER DueTime;
840 DueTime.QuadPart = -(int64_t)u64NanoInterval / 100;
841 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0,
842 &pTimer->aSubTimers[pTimer->fOmniTimer ? RTMpCpuIdToSetIndex(pTimer->idCpu) : 0].NtDpc);
843 }
844 }
845 }
846#else
847 rc = VERR_NOT_SUPPORTED;
848#endif
849 }
850
851 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
852
853 return rc;
854}
855
856
857RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
858{
859 /* It's ok to pass NULL pointer. */
860 if (pTimer == /*NIL_RTTIMER*/ NULL)
861 return VINF_SUCCESS;
862 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
863 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
864
865 /*
866 * We do not support destroying a timer from the callback because it is
867 * not 101% safe since we cannot flush DPCs. Solaris has the same restriction.
868 */
869 AssertReturn(KeGetCurrentIrql() == PASSIVE_LEVEL, VERR_INVALID_CONTEXT);
870
871 /*
872 * Invalidate the timer, stop it if it's running and finally free up the memory.
873 */
874 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
875 rtTimerNtStopWorker(pTimer);
876
877#ifdef RTR0TIMER_NT_HIGH_RES
878 /*
879 * Destroy the high-resolution timer before flushing DPCs.
880 */
881 if (pTimer->pHighResTimer)
882 {
883 g_pfnrtExDeleteTimer(pTimer->pHighResTimer, TRUE /*fCancel*/, TRUE /*fWait*/, NULL);
884 pTimer->pHighResTimer = NULL;
885 }
886#endif
887
888 /*
889 * Flush DPCs to be on the safe side.
890 */
891 if (g_pfnrtNtKeFlushQueuedDpcs)
892 g_pfnrtNtKeFlushQueuedDpcs();
893
894 RTMemFree(pTimer);
895
896 return VINF_SUCCESS;
897}
898
899
900RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
901{
902 *ppTimer = NULL;
903
904 /*
905 * Validate flags.
906 */
907 if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
908 return VERR_INVALID_FLAGS;
909 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
910 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
911 && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
912 return VERR_CPU_NOT_FOUND;
913
914 /*
915 * Allocate the timer handler.
916 */
917 RTCPUID cSubTimers = 1;
918 if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
919 {
920 cSubTimers = RTMpGetMaxCpuId() + 1;
921 Assert(cSubTimers <= RTCPUSET_MAX_CPUS); /* On Windows we have a 1:1 relationship between cpuid and set index. */
922 }
923
924 PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ(RT_UOFFSETOF_DYN(RTTIMER, aSubTimers[cSubTimers]));
925 if (!pTimer)
926 return VERR_NO_MEMORY;
927
928 /*
929 * Initialize it.
930 *
931 * Note! The difference between a SynchronizationTimer and a NotificationTimer
932 * (KeInitializeTimer) is, as far as I can gather, only that the former
933 * will wake up exactly one waiting thread and the latter will wake up
934 * everyone. Since we don't do any waiting on the NtTimer, that is not
935 * relevant to us.
936 */
937 pTimer->u32Magic = RTTIMER_MAGIC;
938 pTimer->cOmniSuspendCountDown = 0;
939 pTimer->fSuspended = true;
940 pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL;
941 pTimer->fOmniTimer = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL;
942 pTimer->idCpu = pTimer->fSpecificCpu ? RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK) : NIL_RTCPUID;
943 pTimer->cSubTimers = cSubTimers;
944 pTimer->pfnTimer = pfnTimer;
945 pTimer->pvUser = pvUser;
946 KeInitializeSpinLock(&pTimer->Spinlock);
947 pTimer->u64NanoInterval = u64NanoInterval;
948
949 int rc = VINF_SUCCESS;
950#ifdef RTR0TIMER_NT_HIGH_RES
951 if ( (fFlags & RTTIMER_FLAGS_HIGH_RES)
952 && RTTimerCanDoHighResolution())
953 {
954 pTimer->pHighResTimer = g_pfnrtExAllocateTimer(pTimer->fOmniTimer ? rtTimerNtHighResOmniCallback
955 : rtTimerNtHighResSimpleCallback, pTimer,
956 EX_TIMER_HIGH_RESOLUTION | EX_TIMER_NOTIFICATION);
957 if (!pTimer->pHighResTimer)
958 rc = VERR_OUT_OF_RESOURCES;
959 }
960 else
961#endif
962 {
963 if (g_pfnrtKeInitializeTimerEx) /** @todo just call KeInitializeTimer. */
964 g_pfnrtKeInitializeTimerEx(&pTimer->NtTimer, SynchronizationTimer);
965 else
966 KeInitializeTimer(&pTimer->NtTimer);
967 }
968 if (RT_SUCCESS(rc))
969 {
970 RTCPUSET OnlineSet;
971 RTMpGetOnlineSet(&OnlineSet);
972
973 if (pTimer->fOmniTimer)
974 {
975 /*
976 * Initialize the per-cpu "sub-timers", select the first online cpu to be
977 * the master. This ASSUMES that no cpus will ever go offline.
978 *
979 * Note! For the high-resolution scenario, all DPC callbacks are slaves as
980 * we have a dedicated timer callback, set above during allocation,
981 * and don't control which CPU it (rtTimerNtHighResOmniCallback) is
982 * called on.
983 */
984 pTimer->iMasterTick = 0;
985 pTimer->idCpu = NIL_RTCPUID;
986 for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++)
987 {
988 pTimer->aSubTimers[iCpu].iTick = 0;
989 pTimer->aSubTimers[iCpu].pParent = pTimer;
990
991 if ( pTimer->idCpu == NIL_RTCPUID
992 && RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
993 {
994 pTimer->idCpu = RTMpCpuIdFromSetIndex(iCpu);
995#ifdef RTR0TIMER_NT_HIGH_RES
996 if (pTimer->pHighResTimer)
997 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniSlaveCallback, &pTimer->aSubTimers[iCpu]);
998 else
999#endif
1000 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniMasterCallback, &pTimer->aSubTimers[iCpu]);
1001 }
1002 else
1003 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniSlaveCallback, &pTimer->aSubTimers[iCpu]);
1004 if (g_pfnrtKeSetImportanceDpc)
1005 g_pfnrtKeSetImportanceDpc(&pTimer->aSubTimers[iCpu].NtDpc, HighImportance);
1006
1007 /* This does not necessarily work for offline CPUs that could potentially be onlined
1008 at runtime, so postpone it. (See troubles on testboxmem1 after r148799.) */
1009 int rc2 = rtMpNtSetTargetProcessorDpc(&pTimer->aSubTimers[iCpu].NtDpc, iCpu);
1010 if (RT_SUCCESS(rc2))
1011 pTimer->aSubTimers[0].fDpcNeedTargetCpuSet = false;
1012 else if (!RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
1013 pTimer->aSubTimers[0].fDpcNeedTargetCpuSet = true;
1014 else
1015 {
1016 rc = rc2;
1017 break;
1018 }
1019 }
1020 Assert(pTimer->idCpu != NIL_RTCPUID);
1021 }
1022 else
1023 {
1024 /*
1025 * Initialize the first "sub-timer", target the DPC on a specific processor
1026 * if requested to do so.
1027 */
1028 pTimer->iMasterTick = 0;
1029 pTimer->aSubTimers[0].iTick = 0;
1030 pTimer->aSubTimers[0].pParent = pTimer;
1031
1032 KeInitializeDpc(&pTimer->aSubTimers[0].NtDpc, rtTimerNtSimpleCallback, pTimer);
1033 if (g_pfnrtKeSetImportanceDpc)
1034 g_pfnrtKeSetImportanceDpc(&pTimer->aSubTimers[0].NtDpc, HighImportance);
1035 if (pTimer->fSpecificCpu)
1036 {
1037 /* This does not necessarily work for offline CPUs that could potentially be onlined
1038 at runtime, so postpone it. (See troubles on testboxmem1 after r148799.) */
1039 int rc2 = rtMpNtSetTargetProcessorDpc(&pTimer->aSubTimers[0].NtDpc, pTimer->idCpu);
1040 if (RT_SUCCESS(rc2))
1041 pTimer->aSubTimers[0].fDpcNeedTargetCpuSet = false;
1042 else if (!RTCpuSetIsMember(&OnlineSet, pTimer->idCpu))
1043 pTimer->aSubTimers[0].fDpcNeedTargetCpuSet = true;
1044 else
1045 rc = rc2;
1046 }
1047 }
1048 if (RT_SUCCESS(rc))
1049 {
1050 *ppTimer = pTimer;
1051 return VINF_SUCCESS;
1052 }
1053
1054#ifdef RTR0TIMER_NT_HIGH_RES
1055 if (pTimer->pHighResTimer)
1056 {
1057 g_pfnrtExDeleteTimer(pTimer->pHighResTimer, FALSE, FALSE, NULL);
1058 pTimer->pHighResTimer = NULL;
1059 }
1060#endif
1061 }
1062
1063 RTMemFree(pTimer);
1064 return rc;
1065}
1066
1067
1068RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
1069{
1070 if (!g_pfnrtNtExSetTimerResolution)
1071 return VERR_NOT_SUPPORTED;
1072
1073 ULONG ulGranted = g_pfnrtNtExSetTimerResolution(u32Request / 100, TRUE);
1074 if (pu32Granted)
1075 *pu32Granted = ulGranted * 100; /* NT -> ns */
1076 return VINF_SUCCESS;
1077}
1078
1079
1080RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
1081{
1082 if (!g_pfnrtNtExSetTimerResolution)
1083 return VERR_NOT_SUPPORTED;
1084
1085 g_pfnrtNtExSetTimerResolution(0 /* ignored */, FALSE);
1086 NOREF(u32Granted);
1087 return VINF_SUCCESS;
1088}
1089
1090
1091RTDECL(bool) RTTimerCanDoHighResolution(void)
1092{
1093#ifdef RTR0TIMER_NT_HIGH_RES
1094 return g_pfnrtExAllocateTimer != NULL
1095 && g_pfnrtExDeleteTimer != NULL
1096 && g_pfnrtExSetTimer != NULL
1097 && g_pfnrtExCancelTimer != NULL;
1098#else
1099 return false;
1100#endif
1101}
1102
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette