VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/timer-r0drv-nt.cpp@ 85561

Last change on this file since 85561 was 82968, checked in by vboxsync, 5 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 20.3 KB
Line 
1/* $Id: timer-r0drv-nt.cpp 82968 2020-02-04 10:35:17Z vboxsync $ */
2/** @file
3 * IPRT - Timers, Ring-0 Driver, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-nt-kernel.h"
32
33#include <iprt/timer.h>
34#include <iprt/mp.h>
35#include <iprt/cpuset.h>
36#include <iprt/err.h>
37#include <iprt/asm.h>
38#include <iprt/assert.h>
39#include <iprt/mem.h>
40#include <iprt/thread.h>
41
42#include "internal-r0drv-nt.h"
43#include "internal/magics.h"
44
45/** This seems to provide better accuracy. */
46#define RTR0TIMER_NT_MANUAL_RE_ARM 1
47
48
49/*********************************************************************************************************************************
50* Structures and Typedefs *
51*********************************************************************************************************************************/
52/**
53 * A sub timer structure.
54 *
55 * This is used for keeping the per-cpu tick and DPC object.
56 */
57typedef struct RTTIMERNTSUBTIMER
58{
59 /** The tick counter. */
60 uint64_t iTick;
61 /** Pointer to the parent timer. */
62 PRTTIMER pParent;
63 /** Thread active executing the worker function, NIL if inactive. */
64 RTNATIVETHREAD volatile hActiveThread;
65 /** The NT DPC object. */
66 KDPC NtDpc;
67} RTTIMERNTSUBTIMER;
68/** Pointer to a NT sub-timer structure. */
69typedef RTTIMERNTSUBTIMER *PRTTIMERNTSUBTIMER;
70
71/**
72 * The internal representation of an Linux timer handle.
73 */
74typedef struct RTTIMER
75{
76 /** Magic.
77 * This is RTTIMER_MAGIC, but changes to something else before the timer
78 * is destroyed to indicate clearly that thread should exit. */
79 uint32_t volatile u32Magic;
80 /** Suspend count down for single shot omnit timers. */
81 int32_t volatile cOmniSuspendCountDown;
82 /** Flag indicating the timer is suspended. */
83 bool volatile fSuspended;
84 /** Whether the timer must run on one specific CPU or not. */
85 bool fSpecificCpu;
86 /** Whether the timer must run on all CPUs or not. */
87 bool fOmniTimer;
88 /** The CPU it must run on if fSpecificCpu is set.
89 * The master CPU for an omni-timer. */
90 RTCPUID idCpu;
91 /** Callback. */
92 PFNRTTIMER pfnTimer;
93 /** User argument. */
94 void *pvUser;
95 /** The timer interval. 0 if one-shot. */
96 uint64_t u64NanoInterval;
97#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
98 /** The desired NT time of the first tick. */
99 uint64_t uNtStartTime;
100#endif
101 /** The Nt timer object. */
102 KTIMER NtTimer;
103 /** The number of sub-timers. */
104 RTCPUID cSubTimers;
105 /** Sub-timers.
106 * Normally there is just one, but for RTTIMER_FLAGS_CPU_ALL this will contain
107 * an entry for all possible cpus. In that case the index will be the same as
108 * for the RTCpuSet. */
109 RTTIMERNTSUBTIMER aSubTimers[1];
110} RTTIMER;
111
112
113#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
114/**
115 * Get current NT interrupt time.
116 * @return NT interrupt time
117 */
118static uint64_t rtTimerNtQueryInterruptTime(void)
119{
120# ifdef RT_ARCH_AMD64
121 return KeQueryInterruptTime(); /* macro */
122# else
123 if (g_pfnrtKeQueryInterruptTime)
124 return g_pfnrtKeQueryInterruptTime();
125
126 /* NT4 */
127 ULARGE_INTEGER InterruptTime;
128 do
129 {
130 InterruptTime.HighPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High1Time;
131 InterruptTime.LowPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.LowPart;
132 } while (((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High2Time != (LONG)InterruptTime.HighPart);
133 return InterruptTime.QuadPart;
134# endif
135}
136#endif /* RTR0TIMER_NT_MANUAL_RE_ARM */
137
138
139/**
140 * Manually re-arms an internval timer.
141 *
142 * Turns out NT doesn't necessarily do a very good job at re-arming timers
143 * accurately.
144 *
145 * @param pTimer The timer.
146 * @param iTick The current timer tick.
147 * @param pMasterDpc The master DPC.
148 */
149DECLINLINE(void) rtTimerNtRearmInternval(PRTTIMER pTimer, uint64_t iTick, PKDPC pMasterDpc)
150{
151#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
152 Assert(pTimer->u64NanoInterval);
153 RT_NOREF1(pMasterDpc);
154
155 uint64_t uNtNext = (iTick * pTimer->u64NanoInterval) / 100 - 10; /* 1us fudge */
156 LARGE_INTEGER DueTime;
157 DueTime.QuadPart = rtTimerNtQueryInterruptTime() - pTimer->uNtStartTime;
158 if (DueTime.QuadPart < 0)
159 DueTime.QuadPart = 0;
160 if ((uint64_t)DueTime.QuadPart < uNtNext)
161 DueTime.QuadPart -= uNtNext;
162 else
163 DueTime.QuadPart = -2500; /* 0.25ms */
164
165 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, &pTimer->aSubTimers[0].NtDpc);
166#else
167 RT_NOREF3(pTimer, iTick, pMasterDpc);
168#endif
169}
170
171
172/**
173 * Timer callback function for the non-omni timers.
174 *
175 * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a one-shot or interval timer.
176 * @param pDpc Pointer to the DPC.
177 * @param pvUser Pointer to our internal timer structure.
178 * @param SystemArgument1 Some system argument.
179 * @param SystemArgument2 Some system argument.
180 */
181static void _stdcall rtTimerNtSimpleCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
182{
183 PRTTIMER pTimer = (PRTTIMER)pvUser;
184 AssertPtr(pTimer);
185#ifdef RT_STRICT
186 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
187 RTAssertMsg2Weak("rtTimerNtSimpleCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
188#endif
189
190 /*
191 * Check that we haven't been suspended before doing the callout.
192 */
193 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
194 && pTimer->u32Magic == RTTIMER_MAGIC)
195 {
196 ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, RTThreadNativeSelf());
197
198 if (!pTimer->u64NanoInterval)
199 ASMAtomicWriteBool(&pTimer->fSuspended, true);
200 uint64_t iTick = ++pTimer->aSubTimers[0].iTick;
201 if (pTimer->u64NanoInterval)
202 rtTimerNtRearmInternval(pTimer, iTick, &pTimer->aSubTimers[0].NtDpc);
203 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
204
205 ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, NIL_RTNATIVETHREAD);
206 }
207
208 NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
209}
210
211
212/**
213 * The slave DPC callback for an omni timer.
214 *
215 * @param pDpc The DPC object.
216 * @param pvUser Pointer to the sub-timer.
217 * @param SystemArgument1 Some system stuff.
218 * @param SystemArgument2 Some system stuff.
219 */
220static void _stdcall rtTimerNtOmniSlaveCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
221{
222 PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
223 PRTTIMER pTimer = pSubTimer->pParent;
224
225 AssertPtr(pTimer);
226#ifdef RT_STRICT
227 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
228 RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
229 int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
230 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
231 RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
232#endif
233
234 /*
235 * Check that we haven't been suspended before doing the callout.
236 */
237 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
238 && pTimer->u32Magic == RTTIMER_MAGIC)
239 {
240 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());
241
242 if (!pTimer->u64NanoInterval)
243 if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
244 ASMAtomicWriteBool(&pTimer->fSuspended, true);
245
246 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
247
248 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
249 }
250
251 NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
252}
253
254
255/**
256 * The timer callback for an omni-timer.
257 *
258 * This is responsible for queueing the DPCs for the other CPUs and
259 * perform the callback on the CPU on which it is called.
260 *
261 * @param pDpc The DPC object.
262 * @param pvUser Pointer to the sub-timer.
263 * @param SystemArgument1 Some system stuff.
264 * @param SystemArgument2 Some system stuff.
265 */
266static void _stdcall rtTimerNtOmniMasterCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
267{
268 PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
269 PRTTIMER pTimer = pSubTimer->pParent;
270 int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
271
272 AssertPtr(pTimer);
273#ifdef RT_STRICT
274 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
275 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
276 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
277 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
278#endif
279
280 /*
281 * Check that we haven't been suspended before scheduling the other DPCs
282 * and doing the callout.
283 */
284 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
285 && pTimer->u32Magic == RTTIMER_MAGIC)
286 {
287 RTCPUSET OnlineSet;
288 RTMpGetOnlineSet(&OnlineSet);
289
290 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());
291
292 if (pTimer->u64NanoInterval)
293 {
294 /*
295 * Recurring timer.
296 */
297 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
298 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
299 && iCpuSelf != iCpu)
300 KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0);
301
302 uint64_t iTick = ++pSubTimer->iTick;
303 rtTimerNtRearmInternval(pTimer, iTick, &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc);
304 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
305 }
306 else
307 {
308 /*
309 * Single shot timers gets complicated wrt to fSuspended maintance.
310 */
311 uint32_t cCpus = 0;
312 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
313 if (RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
314 cCpus++;
315 ASMAtomicAddS32(&pTimer->cOmniSuspendCountDown, cCpus);
316
317 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
318 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
319 && iCpuSelf != iCpu)
320 if (!KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0))
321 ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown); /* already queued and counted. */
322
323 if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
324 ASMAtomicWriteBool(&pTimer->fSuspended, true);
325
326 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
327 }
328
329 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
330 }
331
332 NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
333}
334
335
336
337RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
338{
339 /*
340 * Validate.
341 */
342 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
343 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
344
345 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
346 return VERR_TIMER_ACTIVE;
347 if ( pTimer->fSpecificCpu
348 && !RTMpIsCpuOnline(pTimer->idCpu))
349 return VERR_CPU_OFFLINE;
350
351 /*
352 * Start the timer.
353 */
354 PKDPC pMasterDpc = pTimer->fOmniTimer
355 ? &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc
356 : &pTimer->aSubTimers[0].NtDpc;
357
358#ifndef RTR0TIMER_NT_MANUAL_RE_ARM
359 uint64_t u64Interval = pTimer->u64NanoInterval / 1000000; /* This is ms, believe it or not. */
360 ULONG ulInterval = (ULONG)u64Interval;
361 if (ulInterval != u64Interval)
362 ulInterval = MAXLONG;
363 else if (!ulInterval && pTimer->u64NanoInterval)
364 ulInterval = 1;
365#endif
366
367 LARGE_INTEGER DueTime;
368 DueTime.QuadPart = -(int64_t)(u64First / 100); /* Relative, NT time. */
369 if (!DueTime.QuadPart)
370 DueTime.QuadPart = -1;
371
372 unsigned cSubTimers = pTimer->fOmniTimer ? pTimer->cSubTimers : 1;
373 for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++)
374 pTimer->aSubTimers[iCpu].iTick = 0;
375 ASMAtomicWriteS32(&pTimer->cOmniSuspendCountDown, 0);
376 ASMAtomicWriteBool(&pTimer->fSuspended, false);
377#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
378 pTimer->uNtStartTime = rtTimerNtQueryInterruptTime() + u64First / 100;
379 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc);
380#else
381 KeSetTimerEx(&pTimer->NtTimer, DueTime, ulInterval, pMasterDpc);
382#endif
383 return VINF_SUCCESS;
384}
385
386
387/**
388 * Worker function that stops an active timer.
389 *
390 * Shared by RTTimerStop and RTTimerDestroy.
391 *
392 * @param pTimer The active timer.
393 */
394static void rtTimerNtStopWorker(PRTTIMER pTimer)
395{
396 /*
397 * Just cancel the timer, dequeue the DPCs and flush them (if this is supported).
398 */
399 ASMAtomicWriteBool(&pTimer->fSuspended, true);
400
401 KeCancelTimer(&pTimer->NtTimer);
402
403 for (RTCPUID iCpu = 0; iCpu < pTimer->cSubTimers; iCpu++)
404 KeRemoveQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc);
405}
406
407
408RTDECL(int) RTTimerStop(PRTTIMER pTimer)
409{
410 /*
411 * Validate.
412 */
413 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
414 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
415
416 if (ASMAtomicUoReadBool(&pTimer->fSuspended))
417 return VERR_TIMER_SUSPENDED;
418
419 /*
420 * Call the worker we share with RTTimerDestroy.
421 */
422 rtTimerNtStopWorker(pTimer);
423 return VINF_SUCCESS;
424}
425
426
427RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
428{
429 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
430 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
431 RT_NOREF1(u64NanoInterval);
432
433 return VERR_NOT_SUPPORTED;
434}
435
436
437RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
438{
439 /* It's ok to pass NULL pointer. */
440 if (pTimer == /*NIL_RTTIMER*/ NULL)
441 return VINF_SUCCESS;
442 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
443 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
444
445 /*
446 * We do not support destroying a timer from the callback because it is
447 * not 101% safe since we cannot flush DPCs. Solaris has the same restriction.
448 */
449 AssertReturn(KeGetCurrentIrql() == PASSIVE_LEVEL, VERR_INVALID_CONTEXT);
450
451 /*
452 * Invalidate the timer, stop it if it's running and finally
453 * free up the memory.
454 */
455 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
456 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
457 rtTimerNtStopWorker(pTimer);
458
459 /*
460 * Flush DPCs to be on the safe side.
461 */
462 if (g_pfnrtNtKeFlushQueuedDpcs)
463 g_pfnrtNtKeFlushQueuedDpcs();
464
465 RTMemFree(pTimer);
466
467 return VINF_SUCCESS;
468}
469
470
471RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
472{
473 *ppTimer = NULL;
474
475 /*
476 * Validate flags.
477 */
478 if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
479 return VERR_INVALID_PARAMETER;
480 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
481 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
482 && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
483 return VERR_CPU_NOT_FOUND;
484
485 /*
486 * Allocate the timer handler.
487 */
488 RTCPUID cSubTimers = 1;
489 if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
490 {
491 cSubTimers = RTMpGetMaxCpuId() + 1;
492 Assert(cSubTimers <= RTCPUSET_MAX_CPUS); /* On Windows we have a 1:1 relationship between cpuid and set index. */
493 }
494
495 PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ(RT_UOFFSETOF_DYN(RTTIMER, aSubTimers[cSubTimers]));
496 if (!pTimer)
497 return VERR_NO_MEMORY;
498
499 /*
500 * Initialize it.
501 */
502 pTimer->u32Magic = RTTIMER_MAGIC;
503 pTimer->cOmniSuspendCountDown = 0;
504 pTimer->fSuspended = true;
505 pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL;
506 pTimer->fOmniTimer = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL;
507 pTimer->idCpu = pTimer->fSpecificCpu ? RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK) : NIL_RTCPUID;
508 pTimer->cSubTimers = cSubTimers;
509 pTimer->pfnTimer = pfnTimer;
510 pTimer->pvUser = pvUser;
511 pTimer->u64NanoInterval = u64NanoInterval;
512 if (g_pfnrtKeInitializeTimerEx)
513 g_pfnrtKeInitializeTimerEx(&pTimer->NtTimer, SynchronizationTimer);
514 else
515 KeInitializeTimer(&pTimer->NtTimer);
516 int rc = VINF_SUCCESS;
517 if (pTimer->fOmniTimer)
518 {
519 /*
520 * Initialize the per-cpu "sub-timers", select the first online cpu
521 * to be the master.
522 * ASSUMES that no cpus will ever go offline.
523 */
524 pTimer->idCpu = NIL_RTCPUID;
525 for (unsigned iCpu = 0; iCpu < cSubTimers && RT_SUCCESS(rc); iCpu++)
526 {
527 pTimer->aSubTimers[iCpu].iTick = 0;
528 pTimer->aSubTimers[iCpu].pParent = pTimer;
529
530 if ( pTimer->idCpu == NIL_RTCPUID
531 && RTMpIsCpuOnline(RTMpCpuIdFromSetIndex(iCpu)))
532 {
533 pTimer->idCpu = RTMpCpuIdFromSetIndex(iCpu);
534 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniMasterCallback, &pTimer->aSubTimers[iCpu]);
535 }
536 else
537 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniSlaveCallback, &pTimer->aSubTimers[iCpu]);
538 if (g_pfnrtKeSetImportanceDpc)
539 g_pfnrtKeSetImportanceDpc(&pTimer->aSubTimers[iCpu].NtDpc, HighImportance);
540 rc = rtMpNtSetTargetProcessorDpc(&pTimer->aSubTimers[iCpu].NtDpc, iCpu);
541 }
542 Assert(pTimer->idCpu != NIL_RTCPUID);
543 }
544 else
545 {
546 /*
547 * Initialize the first "sub-timer", target the DPC on a specific processor
548 * if requested to do so.
549 */
550 pTimer->aSubTimers[0].iTick = 0;
551 pTimer->aSubTimers[0].pParent = pTimer;
552
553 KeInitializeDpc(&pTimer->aSubTimers[0].NtDpc, rtTimerNtSimpleCallback, pTimer);
554 if (g_pfnrtKeSetImportanceDpc)
555 g_pfnrtKeSetImportanceDpc(&pTimer->aSubTimers[0].NtDpc, HighImportance);
556 if (pTimer->fSpecificCpu)
557 rc = rtMpNtSetTargetProcessorDpc(&pTimer->aSubTimers[0].NtDpc, (int)pTimer->idCpu);
558 }
559 if (RT_SUCCESS(rc))
560 {
561 *ppTimer = pTimer;
562 return VINF_SUCCESS;
563 }
564
565 RTMemFree(pTimer);
566 return rc;
567}
568
569
570RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
571{
572 if (!g_pfnrtNtExSetTimerResolution)
573 return VERR_NOT_SUPPORTED;
574
575 ULONG ulGranted = g_pfnrtNtExSetTimerResolution(u32Request / 100, TRUE);
576 if (pu32Granted)
577 *pu32Granted = ulGranted * 100; /* NT -> ns */
578 return VINF_SUCCESS;
579}
580
581
582RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
583{
584 if (!g_pfnrtNtExSetTimerResolution)
585 return VERR_NOT_SUPPORTED;
586
587 g_pfnrtNtExSetTimerResolution(0 /* ignored */, FALSE);
588 NOREF(u32Granted);
589 return VINF_SUCCESS;
590}
591
592
593RTDECL(bool) RTTimerCanDoHighResolution(void)
594{
595 return false;
596}
597
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette