VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/timer-r0drv-nt.cpp@ 61888

Last change on this file since 61888 was 57358, checked in by vboxsync, 9 years ago

*: scm cleanup run.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.9 KB
Line 
1/* $Id: timer-r0drv-nt.cpp 57358 2015-08-14 15:16:38Z vboxsync $ */
2/** @file
3 * IPRT - Timers, Ring-0 Driver, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-nt-kernel.h"
32
33#include <iprt/timer.h>
34#include <iprt/mp.h>
35#include <iprt/cpuset.h>
36#include <iprt/err.h>
37#include <iprt/asm.h>
38#include <iprt/assert.h>
39#include <iprt/mem.h>
40#include <iprt/thread.h>
41
42#include "internal-r0drv-nt.h"
43#include "internal/magics.h"
44
45/** This seems to provide better accuracy. */
46#define RTR0TIMER_NT_MANUAL_RE_ARM 1
47
48
49/*********************************************************************************************************************************
50* Structures and Typedefs *
51*********************************************************************************************************************************/
52/**
53 * A sub timer structure.
54 *
55 * This is used for keeping the per-cpu tick and DPC object.
56 */
57typedef struct RTTIMERNTSUBTIMER
58{
59 /** The tick counter. */
60 uint64_t iTick;
61 /** Pointer to the parent timer. */
62 PRTTIMER pParent;
63 /** Thread active executing the worker function, NIL if inactive. */
64 RTNATIVETHREAD volatile hActiveThread;
65 /** The NT DPC object. */
66 KDPC NtDpc;
67} RTTIMERNTSUBTIMER;
68/** Pointer to a NT sub-timer structure. */
69typedef RTTIMERNTSUBTIMER *PRTTIMERNTSUBTIMER;
70
71/**
72 * The internal representation of an Linux timer handle.
73 */
74typedef struct RTTIMER
75{
76 /** Magic.
77 * This is RTTIMER_MAGIC, but changes to something else before the timer
78 * is destroyed to indicate clearly that thread should exit. */
79 uint32_t volatile u32Magic;
80 /** Suspend count down for single shot omnit timers. */
81 int32_t volatile cOmniSuspendCountDown;
82 /** Flag indicating the timer is suspended. */
83 bool volatile fSuspended;
84 /** Whether the timer must run on one specific CPU or not. */
85 bool fSpecificCpu;
86 /** Whether the timer must run on all CPUs or not. */
87 bool fOmniTimer;
88 /** The CPU it must run on if fSpecificCpu is set.
89 * The master CPU for an omni-timer. */
90 RTCPUID idCpu;
91 /** Callback. */
92 PFNRTTIMER pfnTimer;
93 /** User argument. */
94 void *pvUser;
95 /** The timer interval. 0 if one-shot. */
96 uint64_t u64NanoInterval;
97#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
98 /** The desired NT time of the first tick. */
99 uint64_t uNtStartTime;
100#endif
101 /** The Nt timer object. */
102 KTIMER NtTimer;
103 /** The number of sub-timers. */
104 RTCPUID cSubTimers;
105 /** Sub-timers.
106 * Normally there is just one, but for RTTIMER_FLAGS_CPU_ALL this will contain
107 * an entry for all possible cpus. In that case the index will be the same as
108 * for the RTCpuSet. */
109 RTTIMERNTSUBTIMER aSubTimers[1];
110} RTTIMER;
111
112
113#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
114/**
115 * Get current NT interrupt time.
116 * @return NT interrupt time
117 */
118static uint64_t rtTimerNtQueryInterruptTime(void)
119{
120# ifdef RT_ARCH_AMD64
121 return KeQueryInterruptTime(); /* macro */
122# else
123 if (g_pfnrtKeQueryInterruptTime)
124 return g_pfnrtKeQueryInterruptTime();
125
126 /* NT4 */
127 ULARGE_INTEGER InterruptTime;
128 do
129 {
130 InterruptTime.HighPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High1Time;
131 InterruptTime.LowPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.LowPart;
132 } while (((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High2Time != InterruptTime.HighPart);
133 return InterruptTime.QuadPart;
134# endif
135}
136#endif /* RTR0TIMER_NT_MANUAL_RE_ARM */
137
138
139/**
140 * Manually re-arms an internval timer.
141 *
142 * Turns out NT doesn't necessarily do a very good job at re-arming timers
143 * accurately.
144 *
145 * @param pTimer The timer.
146 * @param iTick The current timer tick.
147 * @param pMasterDpc The master DPC.
148 */
149DECLINLINE(void) rtTimerNtRearmInternval(PRTTIMER pTimer, uint64_t iTick, PKDPC pMasterDpc)
150{
151#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
152 Assert(pTimer->u64NanoInterval);
153
154 uint64_t uNtNext = (iTick * pTimer->u64NanoInterval) / 100 - 10; /* 1us fudge */
155 LARGE_INTEGER DueTime;
156 DueTime.QuadPart = rtTimerNtQueryInterruptTime() - pTimer->uNtStartTime;
157 if (DueTime.QuadPart < 0)
158 DueTime.QuadPart = 0;
159 if ((uint64_t)DueTime.QuadPart < uNtNext)
160 DueTime.QuadPart -= uNtNext;
161 else
162 DueTime.QuadPart = -2500; /* 0.25ms */
163
164 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, &pTimer->aSubTimers[0].NtDpc);
165#endif
166}
167
168
169/**
170 * Timer callback function for the non-omni timers.
171 *
172 * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a one-shot or interval timer.
173 * @param pDpc Pointer to the DPC.
174 * @param pvUser Pointer to our internal timer structure.
175 * @param SystemArgument1 Some system argument.
176 * @param SystemArgument2 Some system argument.
177 */
178static void _stdcall rtTimerNtSimpleCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
179{
180 PRTTIMER pTimer = (PRTTIMER)pvUser;
181 AssertPtr(pTimer);
182#ifdef RT_STRICT
183 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
184 RTAssertMsg2Weak("rtTimerNtSimpleCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
185#endif
186
187 /*
188 * Check that we haven't been suspended before doing the callout.
189 */
190 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
191 && pTimer->u32Magic == RTTIMER_MAGIC)
192 {
193 ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, RTThreadNativeSelf());
194
195 if (!pTimer->u64NanoInterval)
196 ASMAtomicWriteBool(&pTimer->fSuspended, true);
197 uint64_t iTick = ++pTimer->aSubTimers[0].iTick;
198 if (pTimer->u64NanoInterval)
199 rtTimerNtRearmInternval(pTimer, iTick, &pTimer->aSubTimers[0].NtDpc);
200 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
201
202 ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, NIL_RTNATIVETHREAD);
203 }
204
205 NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
206}
207
208
209/**
210 * The slave DPC callback for an omni timer.
211 *
212 * @param pDpc The DPC object.
213 * @param pvUser Pointer to the sub-timer.
214 * @param SystemArgument1 Some system stuff.
215 * @param SystemArgument2 Some system stuff.
216 */
217static void _stdcall rtTimerNtOmniSlaveCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
218{
219 PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
220 PRTTIMER pTimer = pSubTimer->pParent;
221
222 AssertPtr(pTimer);
223#ifdef RT_STRICT
224 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
225 RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
226 int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
227 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
228 RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
229#endif
230
231 /*
232 * Check that we haven't been suspended before doing the callout.
233 */
234 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
235 && pTimer->u32Magic == RTTIMER_MAGIC)
236 {
237 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());
238
239 if (!pTimer->u64NanoInterval)
240 if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
241 ASMAtomicWriteBool(&pTimer->fSuspended, true);
242
243 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
244
245 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
246 }
247
248 NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
249}
250
251
252/**
253 * The timer callback for an omni-timer.
254 *
255 * This is responsible for queueing the DPCs for the other CPUs and
256 * perform the callback on the CPU on which it is called.
257 *
258 * @param pDpc The DPC object.
259 * @param pvUser Pointer to the sub-timer.
260 * @param SystemArgument1 Some system stuff.
261 * @param SystemArgument2 Some system stuff.
262 */
263static void _stdcall rtTimerNtOmniMasterCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
264{
265 PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
266 PRTTIMER pTimer = pSubTimer->pParent;
267 int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
268
269 AssertPtr(pTimer);
270#ifdef RT_STRICT
271 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
272 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
273 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
274 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
275#endif
276
277 /*
278 * Check that we haven't been suspended before scheduling the other DPCs
279 * and doing the callout.
280 */
281 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
282 && pTimer->u32Magic == RTTIMER_MAGIC)
283 {
284 RTCPUSET OnlineSet;
285 RTMpGetOnlineSet(&OnlineSet);
286
287 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());
288
289 if (pTimer->u64NanoInterval)
290 {
291 /*
292 * Recurring timer.
293 */
294 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
295 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
296 && iCpuSelf != iCpu)
297 KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0);
298
299 uint64_t iTick = ++pSubTimer->iTick;
300 rtTimerNtRearmInternval(pTimer, iTick, &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc);
301 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
302 }
303 else
304 {
305 /*
306 * Single shot timers gets complicated wrt to fSuspended maintance.
307 */
308 uint32_t cCpus = 0;
309 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
310 if (RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
311 cCpus++;
312 ASMAtomicAddS32(&pTimer->cOmniSuspendCountDown, cCpus);
313
314 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
315 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
316 && iCpuSelf != iCpu)
317 if (!KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0))
318 ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown); /* already queued and counted. */
319
320 if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
321 ASMAtomicWriteBool(&pTimer->fSuspended, true);
322
323 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
324 }
325
326 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
327 }
328
329 NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
330}
331
332
333
334RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
335{
336 /*
337 * Validate.
338 */
339 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
340 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
341
342 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
343 return VERR_TIMER_ACTIVE;
344 if ( pTimer->fSpecificCpu
345 && !RTMpIsCpuOnline(pTimer->idCpu))
346 return VERR_CPU_OFFLINE;
347
348 /*
349 * Start the timer.
350 */
351 PKDPC pMasterDpc = pTimer->fOmniTimer
352 ? &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc
353 : &pTimer->aSubTimers[0].NtDpc;
354
355#ifndef RTR0TIMER_NT_MANUAL_RE_ARM
356 uint64_t u64Interval = pTimer->u64NanoInterval / 1000000; /* This is ms, believe it or not. */
357 ULONG ulInterval = (ULONG)u64Interval;
358 if (ulInterval != u64Interval)
359 ulInterval = MAXLONG;
360 else if (!ulInterval && pTimer->u64NanoInterval)
361 ulInterval = 1;
362#endif
363
364 LARGE_INTEGER DueTime;
365 DueTime.QuadPart = -(int64_t)(u64First / 100); /* Relative, NT time. */
366 if (!DueTime.QuadPart)
367 DueTime.QuadPart = -1;
368
369 unsigned cSubTimers = pTimer->fOmniTimer ? pTimer->cSubTimers : 1;
370 for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++)
371 pTimer->aSubTimers[iCpu].iTick = 0;
372 ASMAtomicWriteS32(&pTimer->cOmniSuspendCountDown, 0);
373 ASMAtomicWriteBool(&pTimer->fSuspended, false);
374#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
375 pTimer->uNtStartTime = rtTimerNtQueryInterruptTime() + u64First / 100;
376 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc);
377#else
378 KeSetTimerEx(&pTimer->NtTimer, DueTime, ulInterval, pMasterDpc);
379#endif
380 return VINF_SUCCESS;
381}
382
383
384/**
385 * Worker function that stops an active timer.
386 *
387 * Shared by RTTimerStop and RTTimerDestroy.
388 *
389 * @param pTimer The active timer.
390 */
391static void rtTimerNtStopWorker(PRTTIMER pTimer)
392{
393 /*
394 * Just cancel the timer, dequeue the DPCs and flush them (if this is supported).
395 */
396 ASMAtomicWriteBool(&pTimer->fSuspended, true);
397
398 KeCancelTimer(&pTimer->NtTimer);
399
400 for (RTCPUID iCpu = 0; iCpu < pTimer->cSubTimers; iCpu++)
401 KeRemoveQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc);
402}
403
404
405RTDECL(int) RTTimerStop(PRTTIMER pTimer)
406{
407 /*
408 * Validate.
409 */
410 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
411 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
412
413 if (ASMAtomicUoReadBool(&pTimer->fSuspended))
414 return VERR_TIMER_SUSPENDED;
415
416 /*
417 * Call the worker we share with RTTimerDestroy.
418 */
419 rtTimerNtStopWorker(pTimer);
420 return VINF_SUCCESS;
421}
422
423
424RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
425{
426 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
427 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
428
429 return VERR_NOT_SUPPORTED;
430}
431
432
433RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
434{
435 /* It's ok to pass NULL pointer. */
436 if (pTimer == /*NIL_RTTIMER*/ NULL)
437 return VINF_SUCCESS;
438 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
439 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
440
441 /*
442 * We do not support destroying a timer from the callback because it is
443 * not 101% safe since we cannot flush DPCs. Solaris has the same restriction.
444 */
445 AssertReturn(KeGetCurrentIrql() == PASSIVE_LEVEL, VERR_INVALID_CONTEXT);
446
447 /*
448 * Invalidate the timer, stop it if it's running and finally
449 * free up the memory.
450 */
451 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
452 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
453 rtTimerNtStopWorker(pTimer);
454
455 /*
456 * Flush DPCs to be on the safe side.
457 */
458 if (g_pfnrtNtKeFlushQueuedDpcs)
459 g_pfnrtNtKeFlushQueuedDpcs();
460
461 RTMemFree(pTimer);
462
463 return VINF_SUCCESS;
464}
465
466
467RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
468{
469 *ppTimer = NULL;
470
471 /*
472 * Validate flags.
473 */
474 if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
475 return VERR_INVALID_PARAMETER;
476 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
477 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
478 && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
479 return VERR_CPU_NOT_FOUND;
480
481 /*
482 * Allocate the timer handler.
483 */
484 RTCPUID cSubTimers = 1;
485 if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
486 {
487 cSubTimers = RTMpGetMaxCpuId() + 1;
488 Assert(cSubTimers <= RTCPUSET_MAX_CPUS); /* On Windows we have a 1:1 relationship between cpuid and set index. */
489 }
490
491 PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ(RT_OFFSETOF(RTTIMER, aSubTimers[cSubTimers]));
492 if (!pTimer)
493 return VERR_NO_MEMORY;
494
495 /*
496 * Initialize it.
497 */
498 pTimer->u32Magic = RTTIMER_MAGIC;
499 pTimer->cOmniSuspendCountDown = 0;
500 pTimer->fSuspended = true;
501 pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL;
502 pTimer->fOmniTimer = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL;
503 pTimer->idCpu = pTimer->fSpecificCpu ? RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK) : NIL_RTCPUID;
504 pTimer->cSubTimers = cSubTimers;
505 pTimer->pfnTimer = pfnTimer;
506 pTimer->pvUser = pvUser;
507 pTimer->u64NanoInterval = u64NanoInterval;
508 KeInitializeTimerEx(&pTimer->NtTimer, SynchronizationTimer);
509 if (pTimer->fOmniTimer)
510 {
511 /*
512 * Initialize the per-cpu "sub-timers", select the first online cpu
513 * to be the master.
514 * ASSUMES that no cpus will ever go offline.
515 */
516 pTimer->idCpu = NIL_RTCPUID;
517 for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++)
518 {
519 pTimer->aSubTimers[iCpu].iTick = 0;
520 pTimer->aSubTimers[iCpu].pParent = pTimer;
521
522 if ( pTimer->idCpu == NIL_RTCPUID
523 && RTMpIsCpuOnline(RTMpCpuIdFromSetIndex(iCpu)))
524 {
525 pTimer->idCpu = RTMpCpuIdFromSetIndex(iCpu);
526 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniMasterCallback, &pTimer->aSubTimers[iCpu]);
527 }
528 else
529 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniSlaveCallback, &pTimer->aSubTimers[iCpu]);
530 KeSetImportanceDpc(&pTimer->aSubTimers[iCpu].NtDpc, HighImportance);
531 KeSetTargetProcessorDpc(&pTimer->aSubTimers[iCpu].NtDpc, (int)RTMpCpuIdFromSetIndex(iCpu));
532 }
533 Assert(pTimer->idCpu != NIL_RTCPUID);
534 }
535 else
536 {
537 /*
538 * Initialize the first "sub-timer", target the DPC on a specific processor
539 * if requested to do so.
540 */
541 pTimer->aSubTimers[0].iTick = 0;
542 pTimer->aSubTimers[0].pParent = pTimer;
543
544 KeInitializeDpc(&pTimer->aSubTimers[0].NtDpc, rtTimerNtSimpleCallback, pTimer);
545 KeSetImportanceDpc(&pTimer->aSubTimers[0].NtDpc, HighImportance);
546 if (pTimer->fSpecificCpu)
547 KeSetTargetProcessorDpc(&pTimer->aSubTimers[0].NtDpc, (int)pTimer->idCpu);
548 }
549
550 *ppTimer = pTimer;
551 return VINF_SUCCESS;
552}
553
554
555RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
556{
557 if (!g_pfnrtNtExSetTimerResolution)
558 return VERR_NOT_SUPPORTED;
559
560 ULONG ulGranted = g_pfnrtNtExSetTimerResolution(u32Request / 100, TRUE);
561 if (pu32Granted)
562 *pu32Granted = ulGranted * 100; /* NT -> ns */
563 return VINF_SUCCESS;
564}
565
566
567RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
568{
569 if (!g_pfnrtNtExSetTimerResolution)
570 return VERR_NOT_SUPPORTED;
571
572 g_pfnrtNtExSetTimerResolution(0 /* ignored */, FALSE);
573 NOREF(u32Granted);
574 return VINF_SUCCESS;
575}
576
577
578RTDECL(bool) RTTimerCanDoHighResolution(void)
579{
580 return false;
581}
582
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette