VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/linux/timer-r0drv-linux.c@ 80290

Last change on this file since 80290 was 80290, checked in by vboxsync, 6 years ago

IPRT/timer-r0drv-linux.c: A shot at working around the unit test failures we're seeing on 4.9-4.12 kernels.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 58.1 KB
Line 
1/* $Id: timer-r0drv-linux.c 80290 2019-08-15 13:30:50Z vboxsync $ */
2/** @file
3 * IPRT - Timers, Ring-0 Driver, Linux.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-linux-kernel.h"
32#include "internal/iprt.h"
33
34#include <iprt/timer.h>
35#include <iprt/time.h>
36#include <iprt/mp.h>
37#include <iprt/cpuset.h>
38#include <iprt/spinlock.h>
39#include <iprt/err.h>
40#include <iprt/asm.h>
41#include <iprt/assert.h>
42#include <iprt/alloc.h>
43
44#include "internal/magics.h"
45
46/** @def RTTIMER_LINUX_WITH_HRTIMER
47 * Whether to use high resolution timers. */
48#if !defined(RTTIMER_LINUX_WITH_HRTIMER) \
49 && defined(IPRT_LINUX_HAS_HRTIMER)
50# define RTTIMER_LINUX_WITH_HRTIMER
51#endif
52
53#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
54# define mod_timer_pinned mod_timer
55# define HRTIMER_MODE_ABS_PINNED HRTIMER_MODE_ABS
56#endif
57
58
59/*********************************************************************************************************************************
60* Structures and Typedefs *
61*********************************************************************************************************************************/
62/**
63 * Timer state machine.
64 *
65 * This is used to try handle the issues with MP events and
66 * timers that runs on all CPUs. It's relatively nasty :-/
67 */
68typedef enum RTTIMERLNXSTATE
69{
70 /** Stopped. */
71 RTTIMERLNXSTATE_STOPPED = 0,
72 /** Transient state; next ACTIVE. */
73 RTTIMERLNXSTATE_STARTING,
74 /** Transient state; next ACTIVE. (not really necessary) */
75 RTTIMERLNXSTATE_MP_STARTING,
76 /** Active. */
77 RTTIMERLNXSTATE_ACTIVE,
78 /** Active and in callback; next ACTIVE, STOPPED or CALLBACK_DESTROYING. */
79 RTTIMERLNXSTATE_CALLBACK,
80 /** Stopped while in the callback; next STOPPED. */
81 RTTIMERLNXSTATE_CB_STOPPING,
82 /** Restarted while in the callback; next ACTIVE, STOPPED, DESTROYING. */
83 RTTIMERLNXSTATE_CB_RESTARTING,
84 /** The callback shall destroy the timer; next STOPPED. */
85 RTTIMERLNXSTATE_CB_DESTROYING,
86 /** Transient state; next STOPPED. */
87 RTTIMERLNXSTATE_STOPPING,
88 /** Transient state; next STOPPED. */
89 RTTIMERLNXSTATE_MP_STOPPING,
90 /** The usual 32-bit hack. */
91 RTTIMERLNXSTATE_32BIT_HACK = 0x7fffffff
92} RTTIMERLNXSTATE;
93
94
95/**
96 * A Linux sub-timer.
97 */
98typedef struct RTTIMERLNXSUBTIMER
99{
100 /** Timer specific data. */
101 union
102 {
103#if defined(RTTIMER_LINUX_WITH_HRTIMER)
104 /** High resolution timer. */
105 struct
106 {
107 /** The linux timer structure. */
108 struct hrtimer LnxTimer;
109 } Hr;
110#endif
111 /** Standard timer. */
112 struct
113 {
114 /** The linux timer structure. */
115 struct timer_list LnxTimer;
116 /** The start of the current run (ns).
117 * This is used to calculate when the timer ought to fire the next time. */
118 uint64_t u64NextTS;
119 /** The u64NextTS in jiffies. */
120 unsigned long ulNextJiffies;
121 /** Set when starting or changing the timer so that u64StartTs
122 * and u64NextTS gets reinitialized (eliminating some jitter). */
123 bool volatile fFirstAfterChg;
124 } Std;
125 } u;
126 /** The current tick number. */
127 uint64_t iTick;
128 /** Restart the single shot timer at this specific time.
129 * Used when a single shot timer is restarted from the callback. */
130 uint64_t volatile uNsRestartAt;
131 /** Pointer to the parent timer. */
132 PRTTIMER pParent;
133 /** The current sub-timer state. */
134 RTTIMERLNXSTATE volatile enmState;
135} RTTIMERLNXSUBTIMER;
136/** Pointer to a linux sub-timer. */
137typedef RTTIMERLNXSUBTIMER *PRTTIMERLNXSUBTIMER;
138
139
140/**
141 * The internal representation of an Linux timer handle.
142 */
143typedef struct RTTIMER
144{
145 /** Magic.
146 * This is RTTIMER_MAGIC, but changes to something else before the timer
147 * is destroyed to indicate clearly that thread should exit. */
148 uint32_t volatile u32Magic;
149 /** Spinlock synchronizing the fSuspended and MP event handling.
150 * This is NIL_RTSPINLOCK if cCpus == 1. */
151 RTSPINLOCK hSpinlock;
152 /** Flag indicating that the timer is suspended. */
153 bool volatile fSuspended;
154 /** Whether the timer must run on one specific CPU or not. */
155 bool fSpecificCpu;
156#ifdef CONFIG_SMP
157 /** Whether the timer must run on all CPUs or not. */
158 bool fAllCpus;
159#endif /* else: All -> specific on non-SMP kernels */
160 /** Whether it is a high resolution timer or a standard one. */
161 bool fHighRes;
162 /** The id of the CPU it must run on if fSpecificCpu is set. */
163 RTCPUID idCpu;
164 /** The number of CPUs this timer should run on. */
165 RTCPUID cCpus;
166 /** Callback. */
167 PFNRTTIMER pfnTimer;
168 /** User argument. */
169 void *pvUser;
170 /** The timer interval. 0 if one-shot. */
171 uint64_t volatile u64NanoInterval;
172 /** This is set to the number of jiffies between ticks if the interval is
173 * an exact number of jiffies. (Standard timers only.) */
174 unsigned long volatile cJiffies;
175 /** The change interval spinlock for standard timers only. */
176 spinlock_t ChgIntLock;
177 /** Workqueue item for delayed destruction. */
178 RTR0LNXWORKQUEUEITEM DtorWorkqueueItem;
179 /** Sub-timers.
180 * Normally there is just one, but for RTTIMER_FLAGS_CPU_ALL this will contain
181 * an entry for all possible cpus. In that case the index will be the same as
182 * for the RTCpuSet. */
183 RTTIMERLNXSUBTIMER aSubTimers[1];
184} RTTIMER;
185
186
187/**
188 * A rtTimerLinuxStartOnCpu and rtTimerLinuxStartOnCpu argument package.
189 */
190typedef struct RTTIMERLINUXSTARTONCPUARGS
191{
192 /** The current time (RTTimeSystemNanoTS). */
193 uint64_t u64Now;
194 /** When to start firing (delta). */
195 uint64_t u64First;
196} RTTIMERLINUXSTARTONCPUARGS;
197/** Pointer to a rtTimerLinuxStartOnCpu argument package. */
198typedef RTTIMERLINUXSTARTONCPUARGS *PRTTIMERLINUXSTARTONCPUARGS;
199
200
201/*********************************************************************************************************************************
202* Internal Functions *
203*********************************************************************************************************************************/
204#ifdef CONFIG_SMP
205static DECLCALLBACK(void) rtTimerLinuxMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser);
206#endif
207
208#if 0
209#define DEBUG_HACKING
210#include <iprt/string.h>
211#include <iprt/asm-amd64-x86.h>
212static void myLogBackdoorPrintf(const char *pszFormat, ...)
213{
214 char szTmp[256];
215 va_list args;
216 size_t cb;
217
218 cb = RTStrPrintf(szTmp, sizeof(szTmp) - 10, "%d: ", RTMpCpuId());
219 va_start(args, pszFormat);
220 cb += RTStrPrintfV(&szTmp[cb], sizeof(szTmp) - cb, pszFormat, args);
221 va_end(args);
222
223 ASMOutStrU8(0x504, (uint8_t *)&szTmp[0], cb);
224}
225# define RTAssertMsg1Weak(pszExpr, uLine, pszFile, pszFunction) \
226 myLogBackdoorPrintf("\n!!Guest Assertion failed!!\n%s(%d) %s\n%s\n", uLine, pszFile, pszFunction, (pszExpr))
227# define RTAssertMsg2Weak myLogBackdoorPrintf
228# define RTTIMERLNX_LOG(a) myLogBackdoorPrintf a
229#else
230# define RTTIMERLNX_LOG(a) do { } while (0)
231#endif
232
233/**
234 * Sets the state.
235 */
236DECLINLINE(void) rtTimerLnxSetState(RTTIMERLNXSTATE volatile *penmState, RTTIMERLNXSTATE enmNewState)
237{
238#ifdef DEBUG_HACKING
239 RTTIMERLNX_LOG(("set %d -> %d\n", *penmState, enmNewState));
240#endif
241 ASMAtomicWriteU32((uint32_t volatile *)penmState, enmNewState);
242}
243
244
245/**
246 * Sets the state if it has a certain value.
247 *
248 * @return true if xchg was done.
249 * @return false if xchg wasn't done.
250 */
251#ifdef DEBUG_HACKING
252#define rtTimerLnxCmpXchgState(penmState, enmNewState, enmCurState) rtTimerLnxCmpXchgStateDebug(penmState, enmNewState, enmCurState, __LINE__)
253static bool rtTimerLnxCmpXchgStateDebug(RTTIMERLNXSTATE volatile *penmState, RTTIMERLNXSTATE enmNewState,
254 RTTIMERLNXSTATE enmCurState, uint32_t uLine)
255{
256 RTTIMERLNXSTATE enmOldState = enmCurState;
257 bool fRc = ASMAtomicCmpXchgExU32((uint32_t volatile *)penmState, enmNewState, enmCurState, (uint32_t *)&enmOldState);
258 RTTIMERLNX_LOG(("cxg %d -> %d - %d at %u\n", enmOldState, enmNewState, fRc, uLine));
259 return fRc;
260}
261#else
262DECLINLINE(bool) rtTimerLnxCmpXchgState(RTTIMERLNXSTATE volatile *penmState, RTTIMERLNXSTATE enmNewState,
263 RTTIMERLNXSTATE enmCurState)
264{
265 return ASMAtomicCmpXchgU32((uint32_t volatile *)penmState, enmNewState, enmCurState);
266}
267#endif
268
269
270/**
271 * Gets the state.
272 */
273DECLINLINE(RTTIMERLNXSTATE) rtTimerLnxGetState(RTTIMERLNXSTATE volatile *penmState)
274{
275 return (RTTIMERLNXSTATE)ASMAtomicUoReadU32((uint32_t volatile *)penmState);
276}
277
278#ifdef RTTIMER_LINUX_WITH_HRTIMER
279
280/**
281 * Converts a nano second time stamp to ktime_t.
282 *
283 * ASSUMES RTTimeSystemNanoTS() is implemented using ktime_get_ts().
284 *
285 * @returns ktime_t.
286 * @param cNanoSecs Nanoseconds.
287 */
288DECLINLINE(ktime_t) rtTimerLnxNanoToKt(uint64_t cNanoSecs)
289{
290 /* With some luck the compiler optimizes the division out of this... (Bet it doesn't.) */
291 return ktime_set(cNanoSecs / 1000000000, cNanoSecs % 1000000000);
292}
293
294/**
295 * Converts ktime_t to a nano second time stamp.
296 *
297 * ASSUMES RTTimeSystemNanoTS() is implemented using ktime_get_ts().
298 *
299 * @returns nano second time stamp.
300 * @param Kt ktime_t.
301 */
302DECLINLINE(uint64_t) rtTimerLnxKtToNano(ktime_t Kt)
303{
304 return ktime_to_ns(Kt);
305}
306
307#endif /* RTTIMER_LINUX_WITH_HRTIMER */
308
309/**
310 * Converts a nano second interval to jiffies.
311 *
312 * @returns Jiffies.
313 * @param cNanoSecs Nanoseconds.
314 */
315DECLINLINE(unsigned long) rtTimerLnxNanoToJiffies(uint64_t cNanoSecs)
316{
317 /* this can be made even better... */
318 if (cNanoSecs > (uint64_t)TICK_NSEC * MAX_JIFFY_OFFSET)
319 return MAX_JIFFY_OFFSET;
320# if ARCH_BITS == 32
321 if (RT_LIKELY(cNanoSecs <= UINT32_MAX))
322 return ((uint32_t)cNanoSecs + (TICK_NSEC-1)) / TICK_NSEC;
323# endif
324 return (cNanoSecs + (TICK_NSEC-1)) / TICK_NSEC;
325}
326
327
328/**
329 * Starts a sub-timer (RTTimerStart).
330 *
331 * @param pSubTimer The sub-timer to start.
332 * @param u64Now The current timestamp (RTTimeSystemNanoTS()).
333 * @param u64First The interval from u64Now to the first time the timer should fire.
334 * @param fPinned true = timer pinned to a specific CPU,
335 * false = timer can migrate between CPUs
336 * @param fHighRes Whether the user requested a high resolution timer or not.
337 * @param enmOldState The old timer state.
338 */
339static void rtTimerLnxStartSubTimer(PRTTIMERLNXSUBTIMER pSubTimer, uint64_t u64Now, uint64_t u64First,
340 bool fPinned, bool fHighRes)
341{
342 /*
343 * Calc when it should start firing.
344 */
345 uint64_t u64NextTS = u64Now + u64First;
346 if (!fHighRes)
347 pSubTimer->u.Std.u64NextTS = u64NextTS;
348 RTTIMERLNX_LOG(("startsubtimer %p\n", pSubTimer->pParent));
349
350 pSubTimer->iTick = 0;
351
352#ifdef RTTIMER_LINUX_WITH_HRTIMER
353 if (fHighRes)
354 hrtimer_start(&pSubTimer->u.Hr.LnxTimer, rtTimerLnxNanoToKt(u64NextTS),
355 fPinned ? HRTIMER_MODE_ABS_PINNED : HRTIMER_MODE_ABS);
356 else
357#endif
358 {
359 unsigned long cJiffies = !u64First ? 0 : rtTimerLnxNanoToJiffies(u64First);
360 pSubTimer->u.Std.ulNextJiffies = jiffies + cJiffies;
361 pSubTimer->u.Std.fFirstAfterChg = true;
362#ifdef CONFIG_SMP
363 if (fPinned)
364 {
365# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
366 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
367# else
368 mod_timer_pinned(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
369# endif
370 }
371 else
372#endif
373 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
374 }
375
376 /* Be a bit careful here since we could be racing the callback. */
377 if (!rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_STARTING))
378 rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_MP_STARTING);
379}
380
381
382/**
383 * Stops a sub-timer (RTTimerStart and rtTimerLinuxMpEvent()).
384 *
385 * The caller has already changed the state, so we will not be in a callback
386 * situation wrt to the calling thread.
387 *
388 * @param pSubTimer The sub-timer.
389 * @param fHighRes Whether the user requested a high resolution timer or not.
390 */
391static void rtTimerLnxStopSubTimer(PRTTIMERLNXSUBTIMER pSubTimer, bool fHighRes)
392{
393 RTTIMERLNX_LOG(("stopsubtimer %p %d\n", pSubTimer->pParent, fHighRes));
394#ifdef RTTIMER_LINUX_WITH_HRTIMER
395 if (fHighRes)
396 {
397 /* There is no equivalent to del_timer in the hrtimer API,
398 hrtimer_cancel() == del_timer_sync(). Just like the WARN_ON in
399 del_timer_sync() asserts, waiting for a timer callback to complete
400 is deadlock prone, so don't do it. */
401 int rc = hrtimer_try_to_cancel(&pSubTimer->u.Hr.LnxTimer);
402 if (rc < 0)
403 {
404 hrtimer_start(&pSubTimer->u.Hr.LnxTimer, ktime_set(KTIME_SEC_MAX, 0), HRTIMER_MODE_ABS);
405 hrtimer_try_to_cancel(&pSubTimer->u.Hr.LnxTimer);
406 }
407 }
408 else
409#endif
410 del_timer(&pSubTimer->u.Std.LnxTimer);
411
412 rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED);
413}
414
415
416/**
417 * Used by RTTimerDestroy and rtTimerLnxCallbackDestroy to do the actual work.
418 *
419 * @param pTimer The timer in question.
420 */
421static void rtTimerLnxDestroyIt(PRTTIMER pTimer)
422{
423 RTSPINLOCK hSpinlock = pTimer->hSpinlock;
424 RTCPUID iCpu;
425 Assert(pTimer->fSuspended);
426 RTTIMERLNX_LOG(("destroyit %p\n", pTimer));
427
428 /*
429 * Remove the MP notifications first because it'll reduce the risk of
430 * us overtaking any MP event that might theoretically be racing us here.
431 */
432#ifdef CONFIG_SMP
433 if ( pTimer->cCpus > 1
434 && hSpinlock != NIL_RTSPINLOCK)
435 {
436 int rc = RTMpNotificationDeregister(rtTimerLinuxMpEvent, pTimer);
437 AssertRC(rc);
438 }
439#endif /* CONFIG_SMP */
440
441 /*
442 * Invalidate the handle.
443 */
444 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
445
446 /*
447 * Make sure all timers have stopped executing since we're stopping them in
448 * an asynchronous manner up in rtTimerLnxStopSubTimer.
449 */
450 iCpu = pTimer->cCpus;
451 while (iCpu-- > 0)
452 {
453#ifdef RTTIMER_LINUX_WITH_HRTIMER
454 if (pTimer->fHighRes)
455 hrtimer_cancel(&pTimer->aSubTimers[iCpu].u.Hr.LnxTimer);
456 else
457#endif
458 del_timer_sync(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer);
459 }
460
461 /*
462 * Finally, free the resources.
463 */
464 RTMemFreeEx(pTimer, RT_UOFFSETOF_DYN(RTTIMER, aSubTimers[pTimer->cCpus]));
465 if (hSpinlock != NIL_RTSPINLOCK)
466 RTSpinlockDestroy(hSpinlock);
467}
468
469
470/**
471 * Workqueue callback (no DECLCALLBACK!) for deferred destruction.
472 *
473 * @param pWork Pointer to the DtorWorkqueueItem member of our timer
474 * structure.
475 */
476static void rtTimerLnxDestroyDeferred(RTR0LNXWORKQUEUEITEM *pWork)
477{
478 PRTTIMER pTimer = RT_FROM_MEMBER(pWork, RTTIMER, DtorWorkqueueItem);
479 rtTimerLnxDestroyIt(pTimer);
480}
481
482
483/**
484 * Called when the timer was destroyed by the callback function.
485 *
486 * @param pTimer The timer.
487 * @param pSubTimer The sub-timer which we're handling, the state of this
488 * will be RTTIMERLNXSTATE_CALLBACK_DESTROYING.
489 */
490static void rtTimerLnxCallbackDestroy(PRTTIMER pTimer, PRTTIMERLNXSUBTIMER pSubTimer)
491{
492 /*
493 * If it's an omni timer, the last dude does the destroying.
494 */
495 if (pTimer->cCpus > 1)
496 {
497 uint32_t iCpu = pTimer->cCpus;
498 RTSpinlockAcquire(pTimer->hSpinlock);
499
500 Assert(pSubTimer->enmState == RTTIMERLNXSTATE_CB_DESTROYING);
501 rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED);
502
503 while (iCpu-- > 0)
504 if (rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState) != RTTIMERLNXSTATE_STOPPED)
505 {
506 RTSpinlockRelease(pTimer->hSpinlock);
507 return;
508 }
509
510 RTSpinlockRelease(pTimer->hSpinlock);
511 }
512
513 /*
514 * Destroying a timer from the callback is unsafe since the callout code
515 * might be touching the timer structure upon return (hrtimer does!). So,
516 * we have to defer the actual destruction to the IRPT workqueue.
517 */
518 rtR0LnxWorkqueuePush(&pTimer->DtorWorkqueueItem, rtTimerLnxDestroyDeferred);
519}
520
521
522#ifdef CONFIG_SMP
523/**
524 * Deal with a sub-timer that has migrated.
525 *
526 * @param pTimer The timer.
527 * @param pSubTimer The sub-timer.
528 */
529static void rtTimerLnxCallbackHandleMigration(PRTTIMER pTimer, PRTTIMERLNXSUBTIMER pSubTimer)
530{
531 RTTIMERLNXSTATE enmState;
532 if (pTimer->cCpus > 1)
533 RTSpinlockAcquire(pTimer->hSpinlock);
534
535 do
536 {
537 enmState = rtTimerLnxGetState(&pSubTimer->enmState);
538 switch (enmState)
539 {
540 case RTTIMERLNXSTATE_STOPPING:
541 case RTTIMERLNXSTATE_MP_STOPPING:
542 enmState = RTTIMERLNXSTATE_STOPPED;
543 case RTTIMERLNXSTATE_STOPPED:
544 break;
545
546 default:
547 AssertMsgFailed(("%d\n", enmState)); RT_FALL_THRU();
548 case RTTIMERLNXSTATE_STARTING:
549 case RTTIMERLNXSTATE_MP_STARTING:
550 case RTTIMERLNXSTATE_ACTIVE:
551 case RTTIMERLNXSTATE_CALLBACK:
552 case RTTIMERLNXSTATE_CB_STOPPING:
553 case RTTIMERLNXSTATE_CB_RESTARTING:
554 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, enmState))
555 enmState = RTTIMERLNXSTATE_STOPPED;
556 break;
557
558 case RTTIMERLNXSTATE_CB_DESTROYING:
559 {
560 if (pTimer->cCpus > 1)
561 RTSpinlockRelease(pTimer->hSpinlock);
562
563 rtTimerLnxCallbackDestroy(pTimer, pSubTimer);
564 return;
565 }
566 }
567 } while (enmState != RTTIMERLNXSTATE_STOPPED);
568
569 if (pTimer->cCpus > 1)
570 RTSpinlockRelease(pTimer->hSpinlock);
571}
572#endif /* CONFIG_SMP */
573
574
575/**
576 * The slow path of rtTimerLnxChangeToCallbackState.
577 *
578 * @returns true if changed successfully, false if not.
579 * @param pSubTimer The sub-timer.
580 */
581static bool rtTimerLnxChangeToCallbackStateSlow(PRTTIMERLNXSUBTIMER pSubTimer)
582{
583 for (;;)
584 {
585 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pSubTimer->enmState);
586 switch (enmState)
587 {
588 case RTTIMERLNXSTATE_ACTIVE:
589 case RTTIMERLNXSTATE_STARTING:
590 case RTTIMERLNXSTATE_MP_STARTING:
591 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_CALLBACK, enmState))
592 return true;
593 break;
594
595 case RTTIMERLNXSTATE_CALLBACK:
596 case RTTIMERLNXSTATE_CB_STOPPING:
597 case RTTIMERLNXSTATE_CB_RESTARTING:
598 case RTTIMERLNXSTATE_CB_DESTROYING:
599 AssertMsgFailed(("%d\n", enmState)); RT_FALL_THRU();
600 default:
601 return false;
602 }
603 ASMNopPause();
604 }
605}
606
607
608/**
609 * Tries to change the sub-timer state to 'callback'.
610 *
611 * @returns true if changed successfully, false if not.
612 * @param pSubTimer The sub-timer.
613 */
614DECLINLINE(bool) rtTimerLnxChangeToCallbackState(PRTTIMERLNXSUBTIMER pSubTimer)
615{
616 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_CALLBACK, RTTIMERLNXSTATE_ACTIVE)))
617 return true;
618 return rtTimerLnxChangeToCallbackStateSlow(pSubTimer);
619}
620
621
622#ifdef RTTIMER_LINUX_WITH_HRTIMER
623/**
624 * Timer callback function for high resolution timers.
625 *
626 * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a
627 * one-shot or interval timer.
628 * @param pHrTimer Pointer to the sub-timer structure.
629 */
630static enum hrtimer_restart rtTimerLinuxHrCallback(struct hrtimer *pHrTimer)
631{
632 PRTTIMERLNXSUBTIMER pSubTimer = RT_FROM_MEMBER(pHrTimer, RTTIMERLNXSUBTIMER, u.Hr.LnxTimer);
633 PRTTIMER pTimer = pSubTimer->pParent;
634
635
636 RTTIMERLNX_LOG(("hrcallback %p\n", pTimer));
637 if (RT_UNLIKELY(!rtTimerLnxChangeToCallbackState(pSubTimer)))
638 return HRTIMER_NORESTART;
639
640#ifdef CONFIG_SMP
641 /*
642 * Check for unwanted migration.
643 */
644 if (pTimer->fAllCpus || pTimer->fSpecificCpu)
645 {
646 RTCPUID idCpu = RTMpCpuId();
647 if (RT_UNLIKELY( pTimer->fAllCpus
648 ? (RTCPUID)(pSubTimer - &pTimer->aSubTimers[0]) != idCpu
649 : pTimer->idCpu != idCpu))
650 {
651 rtTimerLnxCallbackHandleMigration(pTimer, pSubTimer);
652 return HRTIMER_NORESTART;
653 }
654 }
655#endif
656
657 if (pTimer->u64NanoInterval)
658 {
659 /*
660 * Periodic timer, run it and update the native timer afterwards so
661 * we can handle RTTimerStop and RTTimerChangeInterval from the
662 * callback as well as a racing control thread.
663 */
664 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
665 hrtimer_add_expires_ns(&pSubTimer->u.Hr.LnxTimer, ASMAtomicReadU64(&pTimer->u64NanoInterval));
666 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CALLBACK)))
667 return HRTIMER_RESTART;
668 }
669 else
670 {
671 /*
672 * One shot timer (no omni), stop it before dispatching it.
673 * Allow RTTimerStart as well as RTTimerDestroy to be called from
674 * the callback.
675 */
676 ASMAtomicWriteBool(&pTimer->fSuspended, true);
677 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
678 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CALLBACK)))
679 return HRTIMER_NORESTART;
680 }
681
682 /*
683 * Some state change occurred while we were in the callback routine.
684 */
685 for (;;)
686 {
687 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pSubTimer->enmState);
688 switch (enmState)
689 {
690 case RTTIMERLNXSTATE_CB_DESTROYING:
691 rtTimerLnxCallbackDestroy(pTimer, pSubTimer);
692 return HRTIMER_NORESTART;
693
694 case RTTIMERLNXSTATE_CB_STOPPING:
695 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CB_STOPPING))
696 return HRTIMER_NORESTART;
697 break;
698
699 case RTTIMERLNXSTATE_CB_RESTARTING:
700 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CB_RESTARTING))
701 {
702 pSubTimer->iTick = 0;
703 hrtimer_set_expires(&pSubTimer->u.Hr.LnxTimer, rtTimerLnxNanoToKt(pSubTimer->uNsRestartAt));
704 return HRTIMER_RESTART;
705 }
706 break;
707
708 default:
709 AssertMsgFailed(("%d\n", enmState));
710 return HRTIMER_NORESTART;
711 }
712 ASMNopPause();
713 }
714}
715#endif /* RTTIMER_LINUX_WITH_HRTIMER */
716
717
718#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
719/**
720 * Timer callback function for standard timers.
721 *
722 * @param pLnxTimer Pointer to the Linux timer structure.
723 */
724static void rtTimerLinuxStdCallback(struct timer_list *pLnxTimer)
725{
726 PRTTIMERLNXSUBTIMER pSubTimer = from_timer(pSubTimer, pLnxTimer, u.Std.LnxTimer);
727#else
728/**
729 * Timer callback function for standard timers.
730 *
731 * @param ulUser Address of the sub-timer structure.
732 */
733static void rtTimerLinuxStdCallback(unsigned long ulUser)
734{
735 PRTTIMERLNXSUBTIMER pSubTimer = (PRTTIMERLNXSUBTIMER)ulUser;
736#endif
737 PRTTIMER pTimer = pSubTimer->pParent;
738
739 RTTIMERLNX_LOG(("stdcallback %p\n", pTimer));
740 if (RT_UNLIKELY(!rtTimerLnxChangeToCallbackState(pSubTimer)))
741 return;
742
743#ifdef CONFIG_SMP
744 /*
745 * Check for unwanted migration.
746 */
747 if (pTimer->fAllCpus || pTimer->fSpecificCpu)
748 {
749 RTCPUID idCpu = RTMpCpuId();
750 if (RT_UNLIKELY( pTimer->fAllCpus
751 ? (RTCPUID)(pSubTimer - &pTimer->aSubTimers[0]) != idCpu
752 : pTimer->idCpu != idCpu))
753 {
754 rtTimerLnxCallbackHandleMigration(pTimer, pSubTimer);
755 return;
756 }
757 }
758#endif
759
760 if (pTimer->u64NanoInterval)
761 {
762 /*
763 * Interval timer, calculate the next timeout.
764 *
765 * The first time around, we'll re-adjust the u.Std.u64NextTS to
766 * try prevent some jittering if we were started at a bad time.
767 */
768 const uint64_t iTick = ++pSubTimer->iTick;
769 uint64_t u64NanoInterval;
770 unsigned long cJiffies;
771 unsigned long flFlags;
772
773 spin_lock_irqsave(&pTimer->ChgIntLock, flFlags);
774 u64NanoInterval = pTimer->u64NanoInterval;
775 cJiffies = pTimer->cJiffies;
776 if (RT_UNLIKELY(pSubTimer->u.Std.fFirstAfterChg))
777 {
778 pSubTimer->u.Std.fFirstAfterChg = false;
779 pSubTimer->u.Std.u64NextTS = RTTimeSystemNanoTS();
780 pSubTimer->u.Std.ulNextJiffies = jiffies;
781 }
782 spin_unlock_irqrestore(&pTimer->ChgIntLock, flFlags);
783
784 pSubTimer->u.Std.u64NextTS += u64NanoInterval;
785 if (cJiffies)
786 {
787 pSubTimer->u.Std.ulNextJiffies += cJiffies;
788 /* Prevent overflows when the jiffies counter wraps around.
789 * Special thanks to Ken Preslan for helping debugging! */
790 while (time_before(pSubTimer->u.Std.ulNextJiffies, jiffies))
791 {
792 pSubTimer->u.Std.ulNextJiffies += cJiffies;
793 pSubTimer->u.Std.u64NextTS += u64NanoInterval;
794 }
795 }
796 else
797 {
798 const uint64_t u64NanoTS = RTTimeSystemNanoTS();
799 while (pSubTimer->u.Std.u64NextTS < u64NanoTS)
800 pSubTimer->u.Std.u64NextTS += u64NanoInterval;
801 pSubTimer->u.Std.ulNextJiffies = jiffies + rtTimerLnxNanoToJiffies(pSubTimer->u.Std.u64NextTS - u64NanoTS);
802 }
803
804 /*
805 * Run the timer and re-arm it unless the state changed .
806 * .
807 * We must re-arm it afterwards as we're not in a position to undo this .
808 * operation if for instance someone stopped or destroyed us while we .
809 * were in the callback. (Linux takes care of any races here.)
810 */
811 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
812 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CALLBACK)))
813 {
814#ifdef CONFIG_SMP
815 if (pTimer->fSpecificCpu || pTimer->fAllCpus)
816 {
817# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
818 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
819# else
820 mod_timer_pinned(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
821# endif
822 }
823 else
824#endif
825 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
826 return;
827 }
828 }
829 else
830 {
831 /*
832 * One shot timer, stop it before dispatching it.
833 * Allow RTTimerStart as well as RTTimerDestroy to be called from
834 * the callback.
835 */
836 ASMAtomicWriteBool(&pTimer->fSuspended, true);
837 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
838 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CALLBACK)))
839 return;
840 }
841
842 /*
843 * Some state change occurred while we were in the callback routine.
844 */
845 for (;;)
846 {
847 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pSubTimer->enmState);
848 switch (enmState)
849 {
850 case RTTIMERLNXSTATE_CB_DESTROYING:
851 rtTimerLnxCallbackDestroy(pTimer, pSubTimer);
852 return;
853
854 case RTTIMERLNXSTATE_CB_STOPPING:
855 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CB_STOPPING))
856 return;
857 break;
858
859 case RTTIMERLNXSTATE_CB_RESTARTING:
860 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CB_RESTARTING))
861 {
862 uint64_t u64NanoTS;
863 uint64_t u64NextTS;
864 unsigned long flFlags;
865
866 spin_lock_irqsave(&pTimer->ChgIntLock, flFlags);
867 u64NextTS = pSubTimer->uNsRestartAt;
868 u64NanoTS = RTTimeSystemNanoTS();
869 pSubTimer->iTick = 0;
870 pSubTimer->u.Std.u64NextTS = u64NextTS;
871 pSubTimer->u.Std.fFirstAfterChg = true;
872 pSubTimer->u.Std.ulNextJiffies = u64NextTS > u64NanoTS
873 ? jiffies + rtTimerLnxNanoToJiffies(u64NextTS - u64NanoTS)
874 : jiffies;
875 spin_unlock_irqrestore(&pTimer->ChgIntLock, flFlags);
876
877#ifdef CONFIG_SMP
878 if (pTimer->fSpecificCpu || pTimer->fAllCpus)
879 {
880# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
881 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
882# else
883 mod_timer_pinned(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
884# endif
885 }
886 else
887#endif
888 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
889 return;
890 }
891 break;
892
893 default:
894 AssertMsgFailed(("%d\n", enmState));
895 return;
896 }
897 ASMNopPause();
898 }
899}
900
901
902#ifdef CONFIG_SMP
903
904/**
905 * Per-cpu callback function (RTMpOnAll/RTMpOnSpecific).
906 *
907 * @param idCpu The current CPU.
908 * @param pvUser1 Pointer to the timer.
909 * @param pvUser2 Pointer to the argument structure.
910 */
911static DECLCALLBACK(void) rtTimerLnxStartAllOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
912{
913 PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
914 PRTTIMER pTimer = (PRTTIMER)pvUser1;
915 Assert(idCpu < pTimer->cCpus);
916 rtTimerLnxStartSubTimer(&pTimer->aSubTimers[idCpu], pArgs->u64Now, pArgs->u64First, true /*fPinned*/, pTimer->fHighRes);
917}
918
919
920/**
921 * Worker for RTTimerStart() that takes care of the ugly bits.
922 *
923 * @returns RTTimerStart() return value.
924 * @param pTimer The timer.
925 * @param pArgs The argument structure.
926 */
927static int rtTimerLnxOmniStart(PRTTIMER pTimer, PRTTIMERLINUXSTARTONCPUARGS pArgs)
928{
929 RTCPUID iCpu;
930 RTCPUSET OnlineSet;
931 RTCPUSET OnlineSet2;
932 int rc2;
933
934 /*
935 * Prepare all the sub-timers for the startup and then flag the timer
936 * as a whole as non-suspended, make sure we get them all before
937 * clearing fSuspended as the MP handler will be waiting on this
938 * should something happen while we're looping.
939 */
940 RTSpinlockAcquire(pTimer->hSpinlock);
941
942 /* Just make it a omni timer restriction that no stop/start races are allowed. */
943 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
944 if (rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState) != RTTIMERLNXSTATE_STOPPED)
945 {
946 RTSpinlockRelease(pTimer->hSpinlock);
947 return VERR_TIMER_BUSY;
948 }
949
950 do
951 {
952 RTMpGetOnlineSet(&OnlineSet);
953 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
954 {
955 Assert(pTimer->aSubTimers[iCpu].enmState != RTTIMERLNXSTATE_MP_STOPPING);
956 rtTimerLnxSetState(&pTimer->aSubTimers[iCpu].enmState,
957 RTCpuSetIsMember(&OnlineSet, iCpu)
958 ? RTTIMERLNXSTATE_STARTING
959 : RTTIMERLNXSTATE_STOPPED);
960 }
961 } while (!RTCpuSetIsEqual(&OnlineSet, RTMpGetOnlineSet(&OnlineSet2)));
962
963 ASMAtomicWriteBool(&pTimer->fSuspended, false);
964
965 RTSpinlockRelease(pTimer->hSpinlock);
966
967 /*
968 * Start them (can't find any exported function that allows me to
969 * do this without the cross calls).
970 */
971 pArgs->u64Now = RTTimeSystemNanoTS();
972 rc2 = RTMpOnAll(rtTimerLnxStartAllOnCpu, pTimer, pArgs);
973 AssertRC(rc2); /* screw this if it fails. */
974
975 /*
976 * Reset the sub-timers who didn't start up (ALL CPUs case).
977 */
978 RTSpinlockAcquire(pTimer->hSpinlock);
979
980 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
981 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_STARTING))
982 {
983 /** @todo very odd case for a rainy day. Cpus that temporarily went offline while
984 * we were between calls needs to nudged as the MP handler will ignore events for
985 * them because of the STARTING state. This is an extremely unlikely case - not that
986 * that means anything in my experience... ;-) */
987 RTTIMERLNX_LOG(("what!? iCpu=%u -> didn't start\n", iCpu));
988 }
989
990 RTSpinlockRelease(pTimer->hSpinlock);
991
992 return VINF_SUCCESS;
993}
994
995
996/**
997 * Worker for RTTimerStop() that takes care of the ugly SMP bits.
998 *
999 * @returns true if there was any active callbacks, false if not.
1000 * @param pTimer The timer (valid).
1001 * @param fForDestroy Whether this is for RTTimerDestroy or not.
1002 */
1003static bool rtTimerLnxOmniStop(PRTTIMER pTimer, bool fForDestroy)
1004{
1005 bool fActiveCallbacks = false;
1006 RTCPUID iCpu;
1007 RTTIMERLNXSTATE enmState;
1008
1009
1010 /*
1011 * Mark the timer as suspended and flag all timers as stopping, except
1012 * for those being stopped by an MP event.
1013 */
1014 RTSpinlockAcquire(pTimer->hSpinlock);
1015
1016 ASMAtomicWriteBool(&pTimer->fSuspended, true);
1017 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
1018 {
1019 for (;;)
1020 {
1021 enmState = rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState);
1022 if ( enmState == RTTIMERLNXSTATE_STOPPED
1023 || enmState == RTTIMERLNXSTATE_MP_STOPPING)
1024 break;
1025 if ( enmState == RTTIMERLNXSTATE_CALLBACK
1026 || enmState == RTTIMERLNXSTATE_CB_STOPPING
1027 || enmState == RTTIMERLNXSTATE_CB_RESTARTING)
1028 {
1029 Assert(enmState != RTTIMERLNXSTATE_CB_STOPPING || fForDestroy);
1030 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState,
1031 !fForDestroy ? RTTIMERLNXSTATE_CB_STOPPING : RTTIMERLNXSTATE_CB_DESTROYING,
1032 enmState))
1033 {
1034 fActiveCallbacks = true;
1035 break;
1036 }
1037 }
1038 else
1039 {
1040 Assert(enmState == RTTIMERLNXSTATE_ACTIVE);
1041 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState, RTTIMERLNXSTATE_STOPPING, enmState))
1042 break;
1043 }
1044 ASMNopPause();
1045 }
1046 }
1047
1048 RTSpinlockRelease(pTimer->hSpinlock);
1049
1050 /*
1051 * Do the actual stopping. Fortunately, this doesn't require any IPIs.
1052 * Unfortunately it cannot be done synchronously.
1053 */
1054 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
1055 if (rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState) == RTTIMERLNXSTATE_STOPPING)
1056 rtTimerLnxStopSubTimer(&pTimer->aSubTimers[iCpu], pTimer->fHighRes);
1057
1058 return fActiveCallbacks;
1059}
1060
1061
1062/**
1063 * Per-cpu callback function (RTMpOnSpecific) used by rtTimerLinuxMpEvent()
1064 * to start a sub-timer on a cpu that just have come online.
1065 *
1066 * @param idCpu The current CPU.
1067 * @param pvUser1 Pointer to the timer.
1068 * @param pvUser2 Pointer to the argument structure.
1069 */
1070static DECLCALLBACK(void) rtTimerLinuxMpStartOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
1071{
1072 PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
1073 PRTTIMER pTimer = (PRTTIMER)pvUser1;
1074 RTSPINLOCK hSpinlock;
1075 Assert(idCpu < pTimer->cCpus);
1076
1077 /*
1078 * We have to be kind of careful here as we might be racing RTTimerStop
1079 * (and/or RTTimerDestroy, thus the paranoia.
1080 */
1081 hSpinlock = pTimer->hSpinlock;
1082 if ( hSpinlock != NIL_RTSPINLOCK
1083 && pTimer->u32Magic == RTTIMER_MAGIC)
1084 {
1085 RTSpinlockAcquire(hSpinlock);
1086
1087 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
1088 && pTimer->u32Magic == RTTIMER_MAGIC)
1089 {
1090 /* We're sane and the timer is not suspended yet. */
1091 PRTTIMERLNXSUBTIMER pSubTimer = &pTimer->aSubTimers[idCpu];
1092 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STARTING, RTTIMERLNXSTATE_STOPPED))
1093 rtTimerLnxStartSubTimer(pSubTimer, pArgs->u64Now, pArgs->u64First, true /*fPinned*/, pTimer->fHighRes);
1094 }
1095
1096 RTSpinlockRelease(hSpinlock);
1097 }
1098}
1099
1100
1101/**
1102 * MP event notification callback.
1103 *
1104 * @param enmEvent The event.
1105 * @param idCpu The cpu it applies to.
1106 * @param pvUser The timer.
1107 */
1108static DECLCALLBACK(void) rtTimerLinuxMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser)
1109{
1110 PRTTIMER pTimer = (PRTTIMER)pvUser;
1111 PRTTIMERLNXSUBTIMER pSubTimer = &pTimer->aSubTimers[idCpu];
1112 RTSPINLOCK hSpinlock;
1113
1114 Assert(idCpu < pTimer->cCpus);
1115
1116 /*
1117 * Some initial paranoia.
1118 */
1119 if (pTimer->u32Magic != RTTIMER_MAGIC)
1120 return;
1121 hSpinlock = pTimer->hSpinlock;
1122 if (hSpinlock == NIL_RTSPINLOCK)
1123 return;
1124
1125 RTSpinlockAcquire(hSpinlock);
1126
1127 /* Is it active? */
1128 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
1129 && pTimer->u32Magic == RTTIMER_MAGIC)
1130 {
1131 switch (enmEvent)
1132 {
1133 /*
1134 * Try do it without leaving the spin lock, but if we have to, retake it
1135 * when we're on the right cpu.
1136 */
1137 case RTMPEVENT_ONLINE:
1138 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STARTING, RTTIMERLNXSTATE_STOPPED))
1139 {
1140 RTTIMERLINUXSTARTONCPUARGS Args;
1141 Args.u64Now = RTTimeSystemNanoTS();
1142 Args.u64First = 0;
1143
1144 if (RTMpCpuId() == idCpu)
1145 rtTimerLnxStartSubTimer(pSubTimer, Args.u64Now, Args.u64First, true /*fPinned*/, pTimer->fHighRes);
1146 else
1147 {
1148 rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED); /* we'll recheck it. */
1149 RTSpinlockRelease(hSpinlock);
1150
1151 RTMpOnSpecific(idCpu, rtTimerLinuxMpStartOnCpu, pTimer, &Args);
1152 return; /* we've left the spinlock */
1153 }
1154 }
1155 break;
1156
1157 /*
1158 * The CPU is (going) offline, make sure the sub-timer is stopped.
1159 *
1160 * Linux will migrate it to a different CPU, but we don't want this. The
1161 * timer function is checking for this.
1162 */
1163 case RTMPEVENT_OFFLINE:
1164 {
1165 RTTIMERLNXSTATE enmState;
1166 while ( (enmState = rtTimerLnxGetState(&pSubTimer->enmState)) == RTTIMERLNXSTATE_ACTIVE
1167 || enmState == RTTIMERLNXSTATE_CALLBACK
1168 || enmState == RTTIMERLNXSTATE_CB_RESTARTING)
1169 {
1170 if (enmState == RTTIMERLNXSTATE_ACTIVE)
1171 {
1172 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STOPPING, RTTIMERLNXSTATE_ACTIVE))
1173 {
1174 RTSpinlockRelease(hSpinlock);
1175
1176 rtTimerLnxStopSubTimer(pSubTimer, pTimer->fHighRes);
1177 return; /* we've left the spinlock */
1178 }
1179 }
1180 else if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_CB_STOPPING, enmState))
1181 break;
1182
1183 /* State not stable, try again. */
1184 ASMNopPause();
1185 }
1186 break;
1187 }
1188 }
1189 }
1190
1191 RTSpinlockRelease(hSpinlock);
1192}
1193
1194#endif /* CONFIG_SMP */
1195
1196
1197/**
1198 * Callback function use by RTTimerStart via RTMpOnSpecific to start a timer
1199 * running on a specific CPU.
1200 *
1201 * @param idCpu The current CPU.
1202 * @param pvUser1 Pointer to the timer.
1203 * @param pvUser2 Pointer to the argument structure.
1204 */
1205static DECLCALLBACK(void) rtTimerLnxStartOnSpecificCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
1206{
1207 PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
1208 PRTTIMER pTimer = (PRTTIMER)pvUser1;
1209 RT_NOREF_PV(idCpu);
1210 rtTimerLnxStartSubTimer(&pTimer->aSubTimers[0], pArgs->u64Now, pArgs->u64First, true /*fPinned*/, pTimer->fHighRes);
1211}
1212
1213
1214RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
1215{
1216 RTTIMERLINUXSTARTONCPUARGS Args;
1217 int rc2;
1218 IPRT_LINUX_SAVE_EFL_AC();
1219
1220 /*
1221 * Validate.
1222 */
1223 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
1224 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
1225
1226 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
1227 return VERR_TIMER_ACTIVE;
1228 RTTIMERLNX_LOG(("start %p cCpus=%d\n", pTimer, pTimer->cCpus));
1229
1230 Args.u64First = u64First;
1231#ifdef CONFIG_SMP
1232 /*
1233 * Omni timer?
1234 */
1235 if (pTimer->fAllCpus)
1236 {
1237 rc2 = rtTimerLnxOmniStart(pTimer, &Args);
1238 IPRT_LINUX_RESTORE_EFL_AC();
1239 return rc2;
1240 }
1241#endif
1242
1243 /*
1244 * Simple timer - Pretty straight forward if it wasn't for restarting.
1245 */
1246 Args.u64Now = RTTimeSystemNanoTS();
1247 ASMAtomicWriteU64(&pTimer->aSubTimers[0].uNsRestartAt, Args.u64Now + u64First);
1248 for (;;)
1249 {
1250 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pTimer->aSubTimers[0].enmState);
1251 switch (enmState)
1252 {
1253 case RTTIMERLNXSTATE_STOPPED:
1254 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STARTING, RTTIMERLNXSTATE_STOPPED))
1255 {
1256 ASMAtomicWriteBool(&pTimer->fSuspended, false);
1257 if (!pTimer->fSpecificCpu)
1258 rtTimerLnxStartSubTimer(&pTimer->aSubTimers[0], Args.u64Now, Args.u64First,
1259 false /*fPinned*/, pTimer->fHighRes);
1260 else
1261 {
1262 rc2 = RTMpOnSpecific(pTimer->idCpu, rtTimerLnxStartOnSpecificCpu, pTimer, &Args);
1263 if (RT_FAILURE(rc2))
1264 {
1265 /* Suspend it, the cpu id is probably invalid or offline. */
1266 ASMAtomicWriteBool(&pTimer->fSuspended, true);
1267 rtTimerLnxSetState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STOPPED);
1268 return rc2;
1269 }
1270 }
1271 IPRT_LINUX_RESTORE_EFL_AC();
1272 return VINF_SUCCESS;
1273 }
1274 break;
1275
1276 case RTTIMERLNXSTATE_CALLBACK:
1277 case RTTIMERLNXSTATE_CB_STOPPING:
1278 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_CB_RESTARTING, enmState))
1279 {
1280 ASMAtomicWriteBool(&pTimer->fSuspended, false);
1281 IPRT_LINUX_RESTORE_EFL_AC();
1282 return VINF_SUCCESS;
1283 }
1284 break;
1285
1286 default:
1287 AssertMsgFailed(("%d\n", enmState));
1288 IPRT_LINUX_RESTORE_EFL_AC();
1289 return VERR_INTERNAL_ERROR_4;
1290 }
1291 ASMNopPause();
1292 }
1293}
1294RT_EXPORT_SYMBOL(RTTimerStart);
1295
1296
1297/**
1298 * Common worker for RTTimerStop and RTTimerDestroy.
1299 *
1300 * @returns true if there was any active callbacks, false if not.
1301 * @param pTimer The timer to stop.
1302 * @param fForDestroy Whether it's RTTimerDestroy calling or not.
1303 */
1304static bool rtTimerLnxStop(PRTTIMER pTimer, bool fForDestroy)
1305{
1306 RTTIMERLNX_LOG(("lnxstop %p %d\n", pTimer, fForDestroy));
1307#ifdef CONFIG_SMP
1308 /*
1309 * Omni timer?
1310 */
1311 if (pTimer->fAllCpus)
1312 return rtTimerLnxOmniStop(pTimer, fForDestroy);
1313#endif
1314
1315 /*
1316 * Simple timer.
1317 */
1318 ASMAtomicWriteBool(&pTimer->fSuspended, true);
1319 for (;;)
1320 {
1321 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pTimer->aSubTimers[0].enmState);
1322 switch (enmState)
1323 {
1324 case RTTIMERLNXSTATE_ACTIVE:
1325 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STOPPING, RTTIMERLNXSTATE_ACTIVE))
1326 {
1327 rtTimerLnxStopSubTimer(&pTimer->aSubTimers[0], pTimer->fHighRes);
1328 return false;
1329 }
1330 break;
1331
1332 case RTTIMERLNXSTATE_CALLBACK:
1333 case RTTIMERLNXSTATE_CB_RESTARTING:
1334 case RTTIMERLNXSTATE_CB_STOPPING:
1335 Assert(enmState != RTTIMERLNXSTATE_CB_STOPPING || fForDestroy);
1336 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState,
1337 !fForDestroy ? RTTIMERLNXSTATE_CB_STOPPING : RTTIMERLNXSTATE_CB_DESTROYING,
1338 enmState))
1339 return true;
1340 break;
1341
1342 case RTTIMERLNXSTATE_STOPPED:
1343 return VINF_SUCCESS;
1344
1345 case RTTIMERLNXSTATE_CB_DESTROYING:
1346 AssertMsgFailed(("enmState=%d pTimer=%p\n", enmState, pTimer));
1347 return true;
1348
1349 default:
1350 case RTTIMERLNXSTATE_STARTING:
1351 case RTTIMERLNXSTATE_MP_STARTING:
1352 case RTTIMERLNXSTATE_STOPPING:
1353 case RTTIMERLNXSTATE_MP_STOPPING:
1354 AssertMsgFailed(("enmState=%d pTimer=%p\n", enmState, pTimer));
1355 return false;
1356 }
1357
1358 /* State not stable, try again. */
1359 ASMNopPause();
1360 }
1361}
1362
1363
1364RTDECL(int) RTTimerStop(PRTTIMER pTimer)
1365{
1366 /*
1367 * Validate.
1368 */
1369 IPRT_LINUX_SAVE_EFL_AC();
1370 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
1371 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
1372 RTTIMERLNX_LOG(("stop %p\n", pTimer));
1373
1374 if (ASMAtomicUoReadBool(&pTimer->fSuspended))
1375 return VERR_TIMER_SUSPENDED;
1376
1377 rtTimerLnxStop(pTimer, false /*fForDestroy*/);
1378
1379 IPRT_LINUX_RESTORE_EFL_AC();
1380 return VINF_SUCCESS;
1381}
1382RT_EXPORT_SYMBOL(RTTimerStop);
1383
1384
1385RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
1386{
1387 unsigned long cJiffies;
1388 unsigned long flFlags;
1389 IPRT_LINUX_SAVE_EFL_AC();
1390
1391 /*
1392 * Validate.
1393 */
1394 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
1395 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
1396 AssertReturn(u64NanoInterval, VERR_INVALID_PARAMETER);
1397 AssertReturn(u64NanoInterval < UINT64_MAX / 8, VERR_INVALID_PARAMETER);
1398 AssertReturn(pTimer->u64NanoInterval, VERR_INVALID_STATE);
1399 RTTIMERLNX_LOG(("change %p %llu\n", pTimer, u64NanoInterval));
1400
1401#ifdef RTTIMER_LINUX_WITH_HRTIMER
1402 /*
1403 * For the high resolution timers it is easy since we don't care so much
1404 * about when it is applied to the sub-timers.
1405 */
1406 if (pTimer->fHighRes)
1407 {
1408 ASMAtomicWriteU64(&pTimer->u64NanoInterval, u64NanoInterval);
1409 IPRT_LINUX_RESTORE_EFL_AC();
1410 return VINF_SUCCESS;
1411 }
1412#endif
1413
1414 /*
1415 * Standard timers have a bit more complicated way of calculating
1416 * their interval and such. So, forget omni timers for now.
1417 */
1418 if (pTimer->cCpus > 1)
1419 return VERR_NOT_SUPPORTED;
1420
1421 cJiffies = u64NanoInterval / RTTimerGetSystemGranularity();
1422 if (cJiffies * RTTimerGetSystemGranularity() != u64NanoInterval)
1423 cJiffies = 0;
1424
1425 spin_lock_irqsave(&pTimer->ChgIntLock, flFlags);
1426 pTimer->aSubTimers[0].u.Std.fFirstAfterChg = true;
1427 pTimer->cJiffies = cJiffies;
1428 ASMAtomicWriteU64(&pTimer->u64NanoInterval, u64NanoInterval);
1429 spin_unlock_irqrestore(&pTimer->ChgIntLock, flFlags);
1430 IPRT_LINUX_RESTORE_EFL_AC();
1431 return VINF_SUCCESS;
1432}
1433RT_EXPORT_SYMBOL(RTTimerChangeInterval);
1434
1435
1436RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
1437{
1438 bool fCanDestroy;
1439 IPRT_LINUX_SAVE_EFL_AC();
1440
1441 /*
1442 * Validate. It's ok to pass NULL pointer.
1443 */
1444 if (pTimer == /*NIL_RTTIMER*/ NULL)
1445 return VINF_SUCCESS;
1446 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
1447 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
1448 RTTIMERLNX_LOG(("destroy %p\n", pTimer));
1449/** @todo We should invalidate the magic here! */
1450
1451 /*
1452 * Stop the timer if it's still active, then destroy it if we can.
1453 */
1454 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
1455 fCanDestroy = rtTimerLnxStop(pTimer, true /*fForDestroy*/);
1456 else
1457 {
1458 uint32_t iCpu = pTimer->cCpus;
1459 if (pTimer->cCpus > 1)
1460 RTSpinlockAcquire(pTimer->hSpinlock);
1461
1462 fCanDestroy = true;
1463 while (iCpu-- > 0)
1464 {
1465 for (;;)
1466 {
1467 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState);
1468 switch (enmState)
1469 {
1470 case RTTIMERLNXSTATE_CALLBACK:
1471 case RTTIMERLNXSTATE_CB_RESTARTING:
1472 case RTTIMERLNXSTATE_CB_STOPPING:
1473 if (!rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState, RTTIMERLNXSTATE_CB_DESTROYING, enmState))
1474 continue;
1475 fCanDestroy = false;
1476 break;
1477
1478 case RTTIMERLNXSTATE_CB_DESTROYING:
1479 AssertMsgFailed(("%d\n", enmState));
1480 fCanDestroy = false;
1481 break;
1482 default:
1483 break;
1484 }
1485 break;
1486 }
1487 }
1488
1489 if (pTimer->cCpus > 1)
1490 RTSpinlockRelease(pTimer->hSpinlock);
1491 }
1492
1493 if (fCanDestroy)
1494 {
1495 /* For paranoid reasons, defer actually destroying the semaphore when
1496 in atomic or interrupt context. */
1497#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 32)
1498 if (in_atomic() || in_interrupt())
1499#else
1500 if (in_interrupt())
1501#endif
1502 rtR0LnxWorkqueuePush(&pTimer->DtorWorkqueueItem, rtTimerLnxDestroyDeferred);
1503 else
1504 rtTimerLnxDestroyIt(pTimer);
1505 }
1506
1507 IPRT_LINUX_RESTORE_EFL_AC();
1508 return VINF_SUCCESS;
1509}
1510RT_EXPORT_SYMBOL(RTTimerDestroy);
1511
1512
1513RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
1514{
1515 PRTTIMER pTimer;
1516 RTCPUID iCpu;
1517 unsigned cCpus;
1518 int rc;
1519 IPRT_LINUX_SAVE_EFL_AC();
1520
1521 rtR0LnxWorkqueueFlush(); /* for 2.4 */
1522 *ppTimer = NULL;
1523
1524 /*
1525 * Validate flags.
1526 */
1527 if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
1528 {
1529 IPRT_LINUX_RESTORE_EFL_AC();
1530 return VERR_INVALID_PARAMETER;
1531 }
1532 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
1533 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
1534 && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
1535 {
1536 IPRT_LINUX_RESTORE_EFL_AC();
1537 return VERR_CPU_NOT_FOUND;
1538 }
1539
1540 /*
1541 * Allocate the timer handler.
1542 */
1543 cCpus = 1;
1544#ifdef CONFIG_SMP
1545 if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
1546 {
1547 cCpus = RTMpGetMaxCpuId() + 1;
1548 Assert(cCpus <= RTCPUSET_MAX_CPUS); /* On linux we have a 1:1 relationship between cpuid and set index. */
1549 AssertReturnStmt(u64NanoInterval, IPRT_LINUX_RESTORE_EFL_AC(), VERR_NOT_IMPLEMENTED); /* We don't implement single shot on all cpus, sorry. */
1550 }
1551#endif
1552
1553 rc = RTMemAllocEx(RT_UOFFSETOF_DYN(RTTIMER, aSubTimers[cCpus]), 0,
1554 RTMEMALLOCEX_FLAGS_ZEROED | RTMEMALLOCEX_FLAGS_ANY_CTX_FREE, (void **)&pTimer);
1555 if (RT_FAILURE(rc))
1556 {
1557 IPRT_LINUX_RESTORE_EFL_AC();
1558 return rc;
1559 }
1560
1561 /*
1562 * Initialize it.
1563 */
1564 pTimer->u32Magic = RTTIMER_MAGIC;
1565 pTimer->hSpinlock = NIL_RTSPINLOCK;
1566 pTimer->fSuspended = true;
1567 pTimer->fHighRes = !!(fFlags & RTTIMER_FLAGS_HIGH_RES);
1568#ifdef CONFIG_SMP
1569 pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL;
1570 pTimer->fAllCpus = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL;
1571 pTimer->idCpu = pTimer->fSpecificCpu
1572 ? RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)
1573 : NIL_RTCPUID;
1574#else
1575 pTimer->fSpecificCpu = !!(fFlags & RTTIMER_FLAGS_CPU_SPECIFIC);
1576 pTimer->idCpu = RTMpCpuId();
1577#endif
1578 pTimer->cCpus = cCpus;
1579 pTimer->pfnTimer = pfnTimer;
1580 pTimer->pvUser = pvUser;
1581 pTimer->u64NanoInterval = u64NanoInterval;
1582 pTimer->cJiffies = u64NanoInterval / RTTimerGetSystemGranularity();
1583 if (pTimer->cJiffies * RTTimerGetSystemGranularity() != u64NanoInterval)
1584 pTimer->cJiffies = 0;
1585 spin_lock_init(&pTimer->ChgIntLock);
1586
1587 for (iCpu = 0; iCpu < cCpus; iCpu++)
1588 {
1589#ifdef RTTIMER_LINUX_WITH_HRTIMER
1590 if (pTimer->fHighRes)
1591 {
1592 hrtimer_init(&pTimer->aSubTimers[iCpu].u.Hr.LnxTimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1593 pTimer->aSubTimers[iCpu].u.Hr.LnxTimer.function = rtTimerLinuxHrCallback;
1594 }
1595 else
1596#endif
1597 {
1598#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
1599 timer_setup(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer, rtTimerLinuxStdCallback, TIMER_PINNED);
1600#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
1601 init_timer_pinned(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer);
1602#else
1603 init_timer(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer);
1604#endif
1605#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
1606 pTimer->aSubTimers[iCpu].u.Std.LnxTimer.data = (unsigned long)&pTimer->aSubTimers[iCpu];
1607 pTimer->aSubTimers[iCpu].u.Std.LnxTimer.function = rtTimerLinuxStdCallback;
1608#endif
1609 pTimer->aSubTimers[iCpu].u.Std.LnxTimer.expires = jiffies;
1610 pTimer->aSubTimers[iCpu].u.Std.u64NextTS = 0;
1611 }
1612 pTimer->aSubTimers[iCpu].iTick = 0;
1613 pTimer->aSubTimers[iCpu].pParent = pTimer;
1614 pTimer->aSubTimers[iCpu].enmState = RTTIMERLNXSTATE_STOPPED;
1615 }
1616
1617#ifdef CONFIG_SMP
1618 /*
1619 * If this is running on ALL cpus, we'll have to register a callback
1620 * for MP events (so timers can be started/stopped on cpus going
1621 * online/offline). We also create the spinlock for synchronizing
1622 * stop/start/mp-event.
1623 */
1624 if (cCpus > 1)
1625 {
1626 int rc = RTSpinlockCreate(&pTimer->hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "RTTimerLnx");
1627 if (RT_SUCCESS(rc))
1628 rc = RTMpNotificationRegister(rtTimerLinuxMpEvent, pTimer);
1629 else
1630 pTimer->hSpinlock = NIL_RTSPINLOCK;
1631 if (RT_FAILURE(rc))
1632 {
1633 RTTimerDestroy(pTimer);
1634 IPRT_LINUX_RESTORE_EFL_AC();
1635 return rc;
1636 }
1637 }
1638#endif /* CONFIG_SMP */
1639
1640 RTTIMERLNX_LOG(("create %p hires=%d fFlags=%#x cCpus=%u\n", pTimer, pTimer->fHighRes, fFlags, cCpus));
1641 *ppTimer = pTimer;
1642 IPRT_LINUX_RESTORE_EFL_AC();
1643 return VINF_SUCCESS;
1644}
1645RT_EXPORT_SYMBOL(RTTimerCreateEx);
1646
1647
1648RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
1649{
1650#if 0 /** @todo Not sure if this is what we want or not... Add new API for
1651 * querying the resolution of the high res timers? */
1652 struct timespec Ts;
1653 int rc;
1654 IPRT_LINUX_SAVE_EFL_AC();
1655 rc = hrtimer_get_res(CLOCK_MONOTONIC, &Ts);
1656 IPRT_LINUX_RESTORE_EFL_AC();
1657 if (!rc)
1658 {
1659 Assert(!Ts.tv_sec);
1660 return Ts.tv_nsec;
1661 }
1662#endif
1663 /* */
1664#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) || LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
1665 /* On 4.9, 4.10 and 4.12 we've observed tstRTR0Timer failures of the omni timer tests
1666 where we get about half of the ticks we want. The failing test is using this value
1667 as interval. So, this is a very very crude hack to try make omni timers work
1668 correctly without actually knowing what's going wrong... */
1669 return RT_NS_1SEC * 2 / HZ; /* ns */
1670#else
1671 return RT_NS_1SEC / HZ; /* ns */
1672#endif
1673}
1674RT_EXPORT_SYMBOL(RTTimerGetSystemGranularity);
1675
1676
1677RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
1678{
1679 RT_NOREF_PV(u32Request); RT_NOREF_PV(*pu32Granted);
1680 return VERR_NOT_SUPPORTED;
1681}
1682RT_EXPORT_SYMBOL(RTTimerRequestSystemGranularity);
1683
1684
1685RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
1686{
1687 RT_NOREF_PV(u32Granted);
1688 return VERR_NOT_SUPPORTED;
1689}
1690RT_EXPORT_SYMBOL(RTTimerReleaseSystemGranularity);
1691
1692
1693RTDECL(bool) RTTimerCanDoHighResolution(void)
1694{
1695#ifdef RTTIMER_LINUX_WITH_HRTIMER
1696 return true;
1697#else
1698 return false;
1699#endif
1700}
1701RT_EXPORT_SYMBOL(RTTimerCanDoHighResolution);
1702
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette