VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/timer-r0drv-nt.cpp@ 53517

Last change on this file since 53517 was 53457, checked in by vboxsync, 10 years ago

RTimer/r0drv/nt: Rearm the interval timers ourselves to avoid rounding errors caused by millsecond interval resolution vs. sub-millisecond clock tick (e.g. 0.9766ms). Also fixed single-shot omni timers.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 18.8 KB
Line 
1/* $Id: timer-r0drv-nt.cpp 53457 2014-12-05 12:54:16Z vboxsync $ */
2/** @file
3 * IPRT - Timers, Ring-0 Driver, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27/*******************************************************************************
28* Header Files *
29*******************************************************************************/
30#include "the-nt-kernel.h"
31
32#include <iprt/timer.h>
33#include <iprt/mp.h>
34#include <iprt/cpuset.h>
35#include <iprt/err.h>
36#include <iprt/asm.h>
37#include <iprt/assert.h>
38#include <iprt/alloc.h>
39
40#include "internal-r0drv-nt.h"
41#include "internal/magics.h"
42
43/** This seems to provide better accuracy. */
44#define RTR0TIMER_NT_MANUAL_RE_ARM 1
45
46
47/*******************************************************************************
48* Structures and Typedefs *
49*******************************************************************************/
50/**
51 * A sub timer structure.
52 *
53 * This is used for keeping the per-cpu tick and DPC object.
54 */
55typedef struct RTTIMERNTSUBTIMER
56{
57 /** The tick counter. */
58 uint64_t iTick;
59 /** Pointer to the parent timer. */
60 PRTTIMER pParent;
61 /** The NT DPC object. */
62 KDPC NtDpc;
63} RTTIMERNTSUBTIMER;
64/** Pointer to a NT sub-timer structure. */
65typedef RTTIMERNTSUBTIMER *PRTTIMERNTSUBTIMER;
66
67/**
68 * The internal representation of an Linux timer handle.
69 */
70typedef struct RTTIMER
71{
72 /** Magic.
73 * This is RTTIMER_MAGIC, but changes to something else before the timer
74 * is destroyed to indicate clearly that thread should exit. */
75 uint32_t volatile u32Magic;
76 /** Suspend count down for single shot omnit timers. */
77 int32_t volatile cOmniSuspendCountDown;
78 /** Flag indicating the timer is suspended. */
79 bool volatile fSuspended;
80 /** Whether the timer must run on one specific CPU or not. */
81 bool fSpecificCpu;
82 /** Whether the timer must run on all CPUs or not. */
83 bool fOmniTimer;
84 /** The CPU it must run on if fSpecificCpu is set.
85 * The master CPU for an omni-timer. */
86 RTCPUID idCpu;
87 /** Callback. */
88 PFNRTTIMER pfnTimer;
89 /** User argument. */
90 void *pvUser;
91 /** The timer interval. 0 if one-shot. */
92 uint64_t u64NanoInterval;
93#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
94 /** The NT start time . */
95 uint64_t uNtStartTime;
96#endif
97 /** The Nt timer object. */
98 KTIMER NtTimer;
99 /** The number of sub-timers. */
100 RTCPUID cSubTimers;
101 /** Sub-timers.
102 * Normally there is just one, but for RTTIMER_FLAGS_CPU_ALL this will contain
103 * an entry for all possible cpus. In that case the index will be the same as
104 * for the RTCpuSet. */
105 RTTIMERNTSUBTIMER aSubTimers[1];
106} RTTIMER;
107
108
109#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
110/**
111 * Get current NT interrupt time.
112 * @return NT interrupt time
113 */
114static uint64_t rtTimerNtQueryInterruptTime(void)
115{
116# ifdef RT_ARCH_AMD64
117 return KeQueryInterruptTime(); /* macro */
118# else
119 if (g_pfnrtKeQueryInterruptTime)
120 return g_pfnrtKeQueryInterruptTime();
121
122 /* NT4 */
123 ULARGE_INTEGER InterruptTime;
124 do
125 {
126 InterruptTime.HighPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High1Time;
127 InterruptTime.LowPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.LowPart;
128 } while (((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High2Time != InterruptTime.HighPart);
129 return InterruptTime.QuadPart;
130# endif
131}
132#endif /* RTR0TIMER_NT_MANUAL_RE_ARM */
133
134
135/**
136 * Manually re-arms an internval timer.
137 *
138 * Turns out NT doesn't necessarily do a very good job at re-arming timers
139 * accurately.
140 *
141 * @param pTimer The timer.
142 * @param iTick The current timer tick.
143 * @param pMasterDpc The master DPC.
144 */
145DECLINLINE(void) rtTimerNtRearmInternval(PRTTIMER pTimer, uint64_t iTick, PKDPC pMasterDpc)
146{
147#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
148 Assert(pTimer->u64NanoInterval);
149
150 uint64_t uNtNext = (iTick * pTimer->u64NanoInterval) / 100 - 10; /* 1us fudge */
151 LARGE_INTEGER DueTime;
152 DueTime.QuadPart = rtTimerNtQueryInterruptTime() - pTimer->uNtStartTime;
153 if (DueTime.QuadPart < 0)
154 DueTime.QuadPart = 0;
155 if ((uint64_t)DueTime.QuadPart < uNtNext)
156 DueTime.QuadPart -= uNtNext;
157 else
158 DueTime.QuadPart = -2500; /* 0.25ms */
159
160 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, &pTimer->aSubTimers[0].NtDpc);
161#endif
162}
163
164
165/**
166 * Timer callback function for the non-omni timers.
167 *
168 * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a one-shot or interval timer.
169 * @param pDpc Pointer to the DPC.
170 * @param pvUser Pointer to our internal timer structure.
171 * @param SystemArgument1 Some system argument.
172 * @param SystemArgument2 Some system argument.
173 */
174static void _stdcall rtTimerNtSimpleCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
175{
176 PRTTIMER pTimer = (PRTTIMER)pvUser;
177 AssertPtr(pTimer);
178#ifdef RT_STRICT
179 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
180 RTAssertMsg2Weak("rtTimerNtSimpleCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
181#endif
182
183 /*
184 * Check that we haven't been suspended before doing the callout.
185 */
186 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
187 && pTimer->u32Magic == RTTIMER_MAGIC)
188 {
189 if (!pTimer->u64NanoInterval)
190 ASMAtomicWriteBool(&pTimer->fSuspended, true);
191 uint64_t iTick = ++pTimer->aSubTimers[0].iTick;
192 if (pTimer->u64NanoInterval)
193 rtTimerNtRearmInternval(pTimer, iTick, &pTimer->aSubTimers[0].NtDpc);
194 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
195 }
196
197 NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
198}
199
200
201/**
202 * The slave DPC callback for an omni timer.
203 *
204 * @param pDpc The DPC object.
205 * @param pvUser Pointer to the sub-timer.
206 * @param SystemArgument1 Some system stuff.
207 * @param SystemArgument2 Some system stuff.
208 */
209static void _stdcall rtTimerNtOmniSlaveCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
210{
211 PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
212 PRTTIMER pTimer = pSubTimer->pParent;
213
214 AssertPtr(pTimer);
215#ifdef RT_STRICT
216 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
217 RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
218 int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
219 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
220 RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
221#endif
222
223 /*
224 * Check that we haven't been suspended before doing the callout.
225 */
226 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
227 && pTimer->u32Magic == RTTIMER_MAGIC)
228 {
229 if (!pTimer->u64NanoInterval)
230 if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
231 ASMAtomicWriteBool(&pTimer->fSuspended, true);
232
233 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
234 }
235
236 NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
237}
238
239
240/**
241 * The timer callback for an omni-timer.
242 *
243 * This is responsible for queueing the DPCs for the other CPUs and
244 * perform the callback on the CPU on which it is called.
245 *
246 * @param pDpc The DPC object.
247 * @param pvUser Pointer to the sub-timer.
248 * @param SystemArgument1 Some system stuff.
249 * @param SystemArgument2 Some system stuff.
250 */
251static void _stdcall rtTimerNtOmniMasterCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
252{
253 PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
254 PRTTIMER pTimer = pSubTimer->pParent;
255 int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
256
257 AssertPtr(pTimer);
258#ifdef RT_STRICT
259 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
260 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
261 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
262 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
263#endif
264
265 /*
266 * Check that we haven't been suspended before scheduling the other DPCs
267 * and doing the callout.
268 */
269 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
270 && pTimer->u32Magic == RTTIMER_MAGIC)
271 {
272 RTCPUSET OnlineSet;
273 RTMpGetOnlineSet(&OnlineSet);
274
275 if (pTimer->u64NanoInterval)
276 {
277 /*
278 * Recurring timer.
279 */
280 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
281 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
282 && iCpuSelf != iCpu)
283 KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0);
284
285 uint64_t iTick = ++pSubTimer->iTick;
286 rtTimerNtRearmInternval(pTimer, iTick, &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc);
287 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
288 }
289 else
290 {
291 /*
292 * Single shot timers gets complicated wrt to fSuspended maintance.
293 */
294 uint32_t cCpus = 0;
295 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
296 if (RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
297 cCpus++;
298 ASMAtomicAddS32(&pTimer->cOmniSuspendCountDown, cCpus);
299
300 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
301 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
302 && iCpuSelf != iCpu)
303 if (!KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0))
304 ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown); /* already queued and counted. */
305
306 if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
307 ASMAtomicWriteBool(&pTimer->fSuspended, true);
308
309 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
310 }
311 }
312
313 NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
314}
315
316
317
318RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
319{
320 /*
321 * Validate.
322 */
323 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
324 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
325
326 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
327 return VERR_TIMER_ACTIVE;
328 if ( pTimer->fSpecificCpu
329 && !RTMpIsCpuOnline(pTimer->idCpu))
330 return VERR_CPU_OFFLINE;
331
332 /*
333 * Start the timer.
334 */
335 PKDPC pMasterDpc = pTimer->fOmniTimer
336 ? &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc
337 : &pTimer->aSubTimers[0].NtDpc;
338
339#ifndef RTR0TIMER_NT_MANUAL_RE_ARM
340 uint64_t u64Interval = pTimer->u64NanoInterval / 1000000; /* This is ms, believe it or not. */
341 ULONG ulInterval = (ULONG)u64Interval;
342 if (ulInterval != u64Interval)
343 ulInterval = MAXLONG;
344 else if (!ulInterval && pTimer->u64NanoInterval)
345 ulInterval = 1;
346#endif
347
348 LARGE_INTEGER DueTime;
349 DueTime.QuadPart = -(int64_t)(u64First / 100); /* Relative, NT time. */
350 if (!DueTime.QuadPart)
351 DueTime.QuadPart = -1;
352
353 unsigned cSubTimers = pTimer->fOmniTimer ? pTimer->cSubTimers : 1;
354 for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++)
355 pTimer->aSubTimers[iCpu].iTick = 0;
356 ASMAtomicWriteS32(&pTimer->cOmniSuspendCountDown, 0);
357 ASMAtomicWriteBool(&pTimer->fSuspended, false);
358#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
359 pTimer->uNtStartTime = rtTimerNtQueryInterruptTime();
360 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc);
361#else
362 KeSetTimerEx(&pTimer->NtTimer, DueTime, ulInterval, pMasterDpc);
363#endif
364 return VINF_SUCCESS;
365}
366
367
368/**
369 * Worker function that stops an active timer.
370 *
371 * Shared by RTTimerStop and RTTimerDestroy.
372 *
373 * @param pTimer The active timer.
374 */
375static void rtTimerNtStopWorker(PRTTIMER pTimer)
376{
377 /*
378 * Just cancel the timer, dequeue the DPCs and flush them (if this is supported).
379 */
380 ASMAtomicWriteBool(&pTimer->fSuspended, true);
381 KeCancelTimer(&pTimer->NtTimer);
382
383 for (RTCPUID iCpu = 0; iCpu < pTimer->cSubTimers; iCpu++)
384 KeRemoveQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc);
385
386 /*
387 * I'm a bit uncertain whether this should be done during RTTimerStop
388 * or only in RTTimerDestroy()... Linux and Solaris will wait AFAIK,
389 * which is why I'm keeping this here for now.
390 */
391 if (g_pfnrtNtKeFlushQueuedDpcs)
392 g_pfnrtNtKeFlushQueuedDpcs();
393}
394
395
396RTDECL(int) RTTimerStop(PRTTIMER pTimer)
397{
398 /*
399 * Validate.
400 */
401 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
402 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
403
404 if (ASMAtomicUoReadBool(&pTimer->fSuspended))
405 return VERR_TIMER_SUSPENDED;
406
407 /*
408 * Call the worker we share with RTTimerDestroy.
409 */
410 rtTimerNtStopWorker(pTimer);
411 return VINF_SUCCESS;
412}
413
414
415RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
416{
417 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
418 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
419
420 return VERR_NOT_SUPPORTED;
421}
422
423
424RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
425{
426 /* It's ok to pass NULL pointer. */
427 if (pTimer == /*NIL_RTTIMER*/ NULL)
428 return VINF_SUCCESS;
429 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
430 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
431
432 /*
433 * Invalidate the timer, stop it if it's running and finally
434 * free up the memory.
435 */
436 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
437 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
438 rtTimerNtStopWorker(pTimer);
439 RTMemFree(pTimer);
440
441 return VINF_SUCCESS;
442}
443
444
445RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
446{
447 *ppTimer = NULL;
448
449 /*
450 * Validate flags.
451 */
452 if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
453 return VERR_INVALID_PARAMETER;
454 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
455 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
456 && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
457 return VERR_CPU_NOT_FOUND;
458
459 /*
460 * Allocate the timer handler.
461 */
462 RTCPUID cSubTimers = 1;
463 if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
464 {
465 cSubTimers = RTMpGetMaxCpuId() + 1;
466 Assert(cSubTimers <= RTCPUSET_MAX_CPUS); /* On Windows we have a 1:1 relationship between cpuid and set index. */
467 }
468
469 PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ(RT_OFFSETOF(RTTIMER, aSubTimers[cSubTimers]));
470 if (!pTimer)
471 return VERR_NO_MEMORY;
472
473 /*
474 * Initialize it.
475 */
476 pTimer->u32Magic = RTTIMER_MAGIC;
477 pTimer->cOmniSuspendCountDown = 0;
478 pTimer->fSuspended = true;
479 pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL;
480 pTimer->fOmniTimer = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL;
481 pTimer->idCpu = pTimer->fSpecificCpu ? RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK) : NIL_RTCPUID;
482 pTimer->cSubTimers = cSubTimers;
483 pTimer->pfnTimer = pfnTimer;
484 pTimer->pvUser = pvUser;
485 pTimer->u64NanoInterval = u64NanoInterval;
486 KeInitializeTimerEx(&pTimer->NtTimer, SynchronizationTimer);
487 if (pTimer->fOmniTimer)
488 {
489 /*
490 * Initialize the per-cpu "sub-timers", select the first online cpu
491 * to be the master.
492 * ASSUMES that no cpus will ever go offline.
493 */
494 pTimer->idCpu = NIL_RTCPUID;
495 for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++)
496 {
497 pTimer->aSubTimers[iCpu].iTick = 0;
498 pTimer->aSubTimers[iCpu].pParent = pTimer;
499
500 if ( pTimer->idCpu == NIL_RTCPUID
501 && RTMpIsCpuOnline(RTMpCpuIdFromSetIndex(iCpu)))
502 {
503 pTimer->idCpu = RTMpCpuIdFromSetIndex(iCpu);
504 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniMasterCallback, &pTimer->aSubTimers[iCpu]);
505 }
506 else
507 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniSlaveCallback, &pTimer->aSubTimers[iCpu]);
508 KeSetImportanceDpc(&pTimer->aSubTimers[iCpu].NtDpc, HighImportance);
509 KeSetTargetProcessorDpc(&pTimer->aSubTimers[iCpu].NtDpc, (int)RTMpCpuIdFromSetIndex(iCpu));
510 }
511 Assert(pTimer->idCpu != NIL_RTCPUID);
512 }
513 else
514 {
515 /*
516 * Initialize the first "sub-timer", target the DPC on a specific processor
517 * if requested to do so.
518 */
519 pTimer->aSubTimers[0].iTick = 0;
520 pTimer->aSubTimers[0].pParent = pTimer;
521
522 KeInitializeDpc(&pTimer->aSubTimers[0].NtDpc, rtTimerNtSimpleCallback, pTimer);
523 KeSetImportanceDpc(&pTimer->aSubTimers[0].NtDpc, HighImportance);
524 if (pTimer->fSpecificCpu)
525 KeSetTargetProcessorDpc(&pTimer->aSubTimers[0].NtDpc, (int)pTimer->idCpu);
526 }
527
528 *ppTimer = pTimer;
529 return VINF_SUCCESS;
530}
531
532
533RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
534{
535 if (!g_pfnrtNtExSetTimerResolution)
536 return VERR_NOT_SUPPORTED;
537
538 ULONG ulGranted = g_pfnrtNtExSetTimerResolution(u32Request / 100, TRUE);
539 if (pu32Granted)
540 *pu32Granted = ulGranted * 100; /* NT -> ns */
541 return VINF_SUCCESS;
542}
543
544
545RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
546{
547 if (!g_pfnrtNtExSetTimerResolution)
548 return VERR_NOT_SUPPORTED;
549
550 g_pfnrtNtExSetTimerResolution(0 /* ignored */, FALSE);
551 NOREF(u32Granted);
552 return VINF_SUCCESS;
553}
554
555
556RTDECL(bool) RTTimerCanDoHighResolution(void)
557{
558 return false;
559}
560
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette