VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/linux/timer-r0drv-linux.c@ 84207

Last change on this file since 84207 was 82968, checked in by vboxsync, 5 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 59.3 KB
Line 
1/* $Id: timer-r0drv-linux.c 82968 2020-02-04 10:35:17Z vboxsync $ */
2/** @file
3 * IPRT - Timers, Ring-0 Driver, Linux.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-linux-kernel.h"
32#include "internal/iprt.h"
33
34#include <iprt/timer.h>
35#include <iprt/time.h>
36#include <iprt/mp.h>
37#include <iprt/cpuset.h>
38#include <iprt/spinlock.h>
39#include <iprt/err.h>
40#include <iprt/asm.h>
41#include <iprt/assert.h>
42#include <iprt/alloc.h>
43
44#include "internal/magics.h"
45
46/** @def RTTIMER_LINUX_WITH_HRTIMER
47 * Whether to use high resolution timers. */
48#if !defined(RTTIMER_LINUX_WITH_HRTIMER) \
49 && defined(IPRT_LINUX_HAS_HRTIMER)
50# define RTTIMER_LINUX_WITH_HRTIMER
51#endif
52
53#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
54# define mod_timer_pinned mod_timer
55# define HRTIMER_MODE_ABS_PINNED HRTIMER_MODE_ABS
56#endif
57
58
59/*********************************************************************************************************************************
60* Structures and Typedefs *
61*********************************************************************************************************************************/
62/**
63 * Timer state machine.
64 *
65 * This is used to try handle the issues with MP events and
66 * timers that runs on all CPUs. It's relatively nasty :-/
67 */
68typedef enum RTTIMERLNXSTATE
69{
70 /** Stopped. */
71 RTTIMERLNXSTATE_STOPPED = 0,
72 /** Transient state; next ACTIVE. */
73 RTTIMERLNXSTATE_STARTING,
74 /** Transient state; next ACTIVE. (not really necessary) */
75 RTTIMERLNXSTATE_MP_STARTING,
76 /** Active. */
77 RTTIMERLNXSTATE_ACTIVE,
78 /** Active and in callback; next ACTIVE, STOPPED or CALLBACK_DESTROYING. */
79 RTTIMERLNXSTATE_CALLBACK,
80 /** Stopped while in the callback; next STOPPED. */
81 RTTIMERLNXSTATE_CB_STOPPING,
82 /** Restarted while in the callback; next ACTIVE, STOPPED, DESTROYING. */
83 RTTIMERLNXSTATE_CB_RESTARTING,
84 /** The callback shall destroy the timer; next STOPPED. */
85 RTTIMERLNXSTATE_CB_DESTROYING,
86 /** Transient state; next STOPPED. */
87 RTTIMERLNXSTATE_STOPPING,
88 /** Transient state; next STOPPED. */
89 RTTIMERLNXSTATE_MP_STOPPING,
90 /** The usual 32-bit hack. */
91 RTTIMERLNXSTATE_32BIT_HACK = 0x7fffffff
92} RTTIMERLNXSTATE;
93
94
95/**
96 * A Linux sub-timer.
97 */
98typedef struct RTTIMERLNXSUBTIMER
99{
100 /** Timer specific data. */
101 union
102 {
103#if defined(RTTIMER_LINUX_WITH_HRTIMER)
104 /** High resolution timer. */
105 struct
106 {
107 /** The linux timer structure. */
108 struct hrtimer LnxTimer;
109 } Hr;
110#endif
111 /** Standard timer. */
112 struct
113 {
114 /** The linux timer structure. */
115 struct timer_list LnxTimer;
116 /** The start of the current run (ns).
117 * This is used to calculate when the timer ought to fire the next time. */
118 uint64_t u64NextTS;
119 /** When the timer was started. */
120 uint64_t nsStartTS;
121 /** The u64NextTS in jiffies. */
122 unsigned long ulNextJiffies;
123 /** Set when starting or changing the timer so that u64StartTs
124 * and u64NextTS gets reinitialized (eliminating some jitter). */
125 bool volatile fFirstAfterChg;
126 } Std;
127 } u;
128 /** The current tick number. */
129 uint64_t iTick;
130 /** Restart the single shot timer at this specific time.
131 * Used when a single shot timer is restarted from the callback. */
132 uint64_t volatile uNsRestartAt;
133 /** Pointer to the parent timer. */
134 PRTTIMER pParent;
135 /** The current sub-timer state. */
136 RTTIMERLNXSTATE volatile enmState;
137} RTTIMERLNXSUBTIMER;
138/** Pointer to a linux sub-timer. */
139typedef RTTIMERLNXSUBTIMER *PRTTIMERLNXSUBTIMER;
140
141
142/**
143 * The internal representation of an Linux timer handle.
144 */
145typedef struct RTTIMER
146{
147 /** Magic.
148 * This is RTTIMER_MAGIC, but changes to something else before the timer
149 * is destroyed to indicate clearly that thread should exit. */
150 uint32_t volatile u32Magic;
151 /** Spinlock synchronizing the fSuspended and MP event handling.
152 * This is NIL_RTSPINLOCK if cCpus == 1. */
153 RTSPINLOCK hSpinlock;
154 /** Flag indicating that the timer is suspended. */
155 bool volatile fSuspended;
156 /** Whether the timer must run on one specific CPU or not. */
157 bool fSpecificCpu;
158#ifdef CONFIG_SMP
159 /** Whether the timer must run on all CPUs or not. */
160 bool fAllCpus;
161#endif /* else: All -> specific on non-SMP kernels */
162 /** Whether it is a high resolution timer or a standard one. */
163 bool fHighRes;
164 /** The id of the CPU it must run on if fSpecificCpu is set. */
165 RTCPUID idCpu;
166 /** The number of CPUs this timer should run on. */
167 RTCPUID cCpus;
168 /** Callback. */
169 PFNRTTIMER pfnTimer;
170 /** User argument. */
171 void *pvUser;
172 /** The timer interval. 0 if one-shot. */
173 uint64_t volatile u64NanoInterval;
174 /** This is set to the number of jiffies between ticks if the interval is
175 * an exact number of jiffies. (Standard timers only.) */
176 unsigned long volatile cJiffies;
177 /** The change interval spinlock for standard timers only. */
178 spinlock_t ChgIntLock;
179 /** Workqueue item for delayed destruction. */
180 RTR0LNXWORKQUEUEITEM DtorWorkqueueItem;
181 /** Sub-timers.
182 * Normally there is just one, but for RTTIMER_FLAGS_CPU_ALL this will contain
183 * an entry for all possible cpus. In that case the index will be the same as
184 * for the RTCpuSet. */
185 RTTIMERLNXSUBTIMER aSubTimers[1];
186} RTTIMER;
187
188
189/**
190 * A rtTimerLinuxStartOnCpu and rtTimerLinuxStartOnCpu argument package.
191 */
192typedef struct RTTIMERLINUXSTARTONCPUARGS
193{
194 /** The current time (RTTimeSystemNanoTS). */
195 uint64_t u64Now;
196 /** When to start firing (delta). */
197 uint64_t u64First;
198} RTTIMERLINUXSTARTONCPUARGS;
199/** Pointer to a rtTimerLinuxStartOnCpu argument package. */
200typedef RTTIMERLINUXSTARTONCPUARGS *PRTTIMERLINUXSTARTONCPUARGS;
201
202
203/*********************************************************************************************************************************
204* Internal Functions *
205*********************************************************************************************************************************/
206#ifdef CONFIG_SMP
207static DECLCALLBACK(void) rtTimerLinuxMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser);
208#endif
209
210#if 0
211#define DEBUG_HACKING
212#include <iprt/string.h>
213#include <iprt/asm-amd64-x86.h>
214static void myLogBackdoorPrintf(const char *pszFormat, ...)
215{
216 char szTmp[256];
217 va_list args;
218 size_t cb;
219
220 cb = RTStrPrintf(szTmp, sizeof(szTmp) - 10, "%d: ", RTMpCpuId());
221 va_start(args, pszFormat);
222 cb += RTStrPrintfV(&szTmp[cb], sizeof(szTmp) - cb, pszFormat, args);
223 va_end(args);
224
225 ASMOutStrU8(0x504, (uint8_t *)&szTmp[0], cb);
226}
227# define RTAssertMsg1Weak(pszExpr, uLine, pszFile, pszFunction) \
228 myLogBackdoorPrintf("\n!!Guest Assertion failed!!\n%s(%d) %s\n%s\n", uLine, pszFile, pszFunction, (pszExpr))
229# define RTAssertMsg2Weak myLogBackdoorPrintf
230# define RTTIMERLNX_LOG(a) myLogBackdoorPrintf a
231#else
232# define RTTIMERLNX_LOG(a) do { } while (0)
233#endif
234
235/**
236 * Sets the state.
237 */
238DECLINLINE(void) rtTimerLnxSetState(RTTIMERLNXSTATE volatile *penmState, RTTIMERLNXSTATE enmNewState)
239{
240#ifdef DEBUG_HACKING
241 RTTIMERLNX_LOG(("set %d -> %d\n", *penmState, enmNewState));
242#endif
243 ASMAtomicWriteU32((uint32_t volatile *)penmState, enmNewState);
244}
245
246
247/**
248 * Sets the state if it has a certain value.
249 *
250 * @return true if xchg was done.
251 * @return false if xchg wasn't done.
252 */
253#ifdef DEBUG_HACKING
254#define rtTimerLnxCmpXchgState(penmState, enmNewState, enmCurState) rtTimerLnxCmpXchgStateDebug(penmState, enmNewState, enmCurState, __LINE__)
255static bool rtTimerLnxCmpXchgStateDebug(RTTIMERLNXSTATE volatile *penmState, RTTIMERLNXSTATE enmNewState,
256 RTTIMERLNXSTATE enmCurState, uint32_t uLine)
257{
258 RTTIMERLNXSTATE enmOldState = enmCurState;
259 bool fRc = ASMAtomicCmpXchgExU32((uint32_t volatile *)penmState, enmNewState, enmCurState, (uint32_t *)&enmOldState);
260 RTTIMERLNX_LOG(("cxg %d -> %d - %d at %u\n", enmOldState, enmNewState, fRc, uLine));
261 return fRc;
262}
263#else
264DECLINLINE(bool) rtTimerLnxCmpXchgState(RTTIMERLNXSTATE volatile *penmState, RTTIMERLNXSTATE enmNewState,
265 RTTIMERLNXSTATE enmCurState)
266{
267 return ASMAtomicCmpXchgU32((uint32_t volatile *)penmState, enmNewState, enmCurState);
268}
269#endif
270
271
272/**
273 * Gets the state.
274 */
275DECLINLINE(RTTIMERLNXSTATE) rtTimerLnxGetState(RTTIMERLNXSTATE volatile *penmState)
276{
277 return (RTTIMERLNXSTATE)ASMAtomicUoReadU32((uint32_t volatile *)penmState);
278}
279
280#ifdef RTTIMER_LINUX_WITH_HRTIMER
281
282/**
283 * Converts a nano second time stamp to ktime_t.
284 *
285 * ASSUMES RTTimeSystemNanoTS() is implemented using ktime_get_ts().
286 *
287 * @returns ktime_t.
288 * @param cNanoSecs Nanoseconds.
289 */
290DECLINLINE(ktime_t) rtTimerLnxNanoToKt(uint64_t cNanoSecs)
291{
292 /* With some luck the compiler optimizes the division out of this... (Bet it doesn't.) */
293 return ktime_set(cNanoSecs / 1000000000, cNanoSecs % 1000000000);
294}
295
296/**
297 * Converts ktime_t to a nano second time stamp.
298 *
299 * ASSUMES RTTimeSystemNanoTS() is implemented using ktime_get_ts().
300 *
301 * @returns nano second time stamp.
302 * @param Kt ktime_t.
303 */
304DECLINLINE(uint64_t) rtTimerLnxKtToNano(ktime_t Kt)
305{
306 return ktime_to_ns(Kt);
307}
308
309#endif /* RTTIMER_LINUX_WITH_HRTIMER */
310
311/**
312 * Converts a nano second interval to jiffies.
313 *
314 * @returns Jiffies.
315 * @param cNanoSecs Nanoseconds.
316 */
317DECLINLINE(unsigned long) rtTimerLnxNanoToJiffies(uint64_t cNanoSecs)
318{
319 /* this can be made even better... */
320 if (cNanoSecs > (uint64_t)TICK_NSEC * MAX_JIFFY_OFFSET)
321 return MAX_JIFFY_OFFSET;
322# if ARCH_BITS == 32
323 if (RT_LIKELY(cNanoSecs <= UINT32_MAX))
324 return ((uint32_t)cNanoSecs + (TICK_NSEC-1)) / TICK_NSEC;
325# endif
326 return (cNanoSecs + (TICK_NSEC-1)) / TICK_NSEC;
327}
328
329
330/**
331 * Starts a sub-timer (RTTimerStart).
332 *
333 * @param pSubTimer The sub-timer to start.
334 * @param u64Now The current timestamp (RTTimeSystemNanoTS()).
335 * @param u64First The interval from u64Now to the first time the timer should fire.
336 * @param fPinned true = timer pinned to a specific CPU,
337 * false = timer can migrate between CPUs
338 * @param fHighRes Whether the user requested a high resolution timer or not.
339 * @param enmOldState The old timer state.
340 */
341static void rtTimerLnxStartSubTimer(PRTTIMERLNXSUBTIMER pSubTimer, uint64_t u64Now, uint64_t u64First,
342 bool fPinned, bool fHighRes)
343{
344 /*
345 * Calc when it should start firing.
346 */
347 uint64_t u64NextTS = u64Now + u64First;
348 if (!fHighRes)
349 {
350 pSubTimer->u.Std.u64NextTS = u64NextTS;
351 pSubTimer->u.Std.nsStartTS = u64NextTS;
352 }
353 RTTIMERLNX_LOG(("startsubtimer %p\n", pSubTimer->pParent));
354
355 pSubTimer->iTick = 0;
356
357#ifdef RTTIMER_LINUX_WITH_HRTIMER
358 if (fHighRes)
359 hrtimer_start(&pSubTimer->u.Hr.LnxTimer, rtTimerLnxNanoToKt(u64NextTS),
360 fPinned ? HRTIMER_MODE_ABS_PINNED : HRTIMER_MODE_ABS);
361 else
362#endif
363 {
364 unsigned long cJiffies = !u64First ? 0 : rtTimerLnxNanoToJiffies(u64First);
365 pSubTimer->u.Std.ulNextJiffies = jiffies + cJiffies;
366 pSubTimer->u.Std.fFirstAfterChg = true;
367#ifdef CONFIG_SMP
368 if (fPinned)
369 {
370# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
371 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
372# else
373 mod_timer_pinned(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
374# endif
375 }
376 else
377#endif
378 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
379 }
380
381 /* Be a bit careful here since we could be racing the callback. */
382 if (!rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_STARTING))
383 rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_MP_STARTING);
384}
385
386
387/**
388 * Stops a sub-timer (RTTimerStart and rtTimerLinuxMpEvent()).
389 *
390 * The caller has already changed the state, so we will not be in a callback
391 * situation wrt to the calling thread.
392 *
393 * @param pSubTimer The sub-timer.
394 * @param fHighRes Whether the user requested a high resolution timer or not.
395 */
396static void rtTimerLnxStopSubTimer(PRTTIMERLNXSUBTIMER pSubTimer, bool fHighRes)
397{
398 RTTIMERLNX_LOG(("stopsubtimer %p %d\n", pSubTimer->pParent, fHighRes));
399#ifdef RTTIMER_LINUX_WITH_HRTIMER
400 if (fHighRes)
401 {
402 /* There is no equivalent to del_timer in the hrtimer API,
403 hrtimer_cancel() == del_timer_sync(). Just like the WARN_ON in
404 del_timer_sync() asserts, waiting for a timer callback to complete
405 is deadlock prone, so don't do it. */
406 int rc = hrtimer_try_to_cancel(&pSubTimer->u.Hr.LnxTimer);
407 if (rc < 0)
408 {
409 hrtimer_start(&pSubTimer->u.Hr.LnxTimer, ktime_set(KTIME_SEC_MAX, 0), HRTIMER_MODE_ABS);
410 hrtimer_try_to_cancel(&pSubTimer->u.Hr.LnxTimer);
411 }
412 }
413 else
414#endif
415 del_timer(&pSubTimer->u.Std.LnxTimer);
416
417 rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED);
418}
419
420
421/**
422 * Used by RTTimerDestroy and rtTimerLnxCallbackDestroy to do the actual work.
423 *
424 * @param pTimer The timer in question.
425 */
426static void rtTimerLnxDestroyIt(PRTTIMER pTimer)
427{
428 RTSPINLOCK hSpinlock = pTimer->hSpinlock;
429 RTCPUID iCpu;
430 Assert(pTimer->fSuspended);
431 RTTIMERLNX_LOG(("destroyit %p\n", pTimer));
432
433 /*
434 * Remove the MP notifications first because it'll reduce the risk of
435 * us overtaking any MP event that might theoretically be racing us here.
436 */
437#ifdef CONFIG_SMP
438 if ( pTimer->cCpus > 1
439 && hSpinlock != NIL_RTSPINLOCK)
440 {
441 int rc = RTMpNotificationDeregister(rtTimerLinuxMpEvent, pTimer);
442 AssertRC(rc);
443 }
444#endif /* CONFIG_SMP */
445
446 /*
447 * Invalidate the handle.
448 */
449 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
450
451 /*
452 * Make sure all timers have stopped executing since we're stopping them in
453 * an asynchronous manner up in rtTimerLnxStopSubTimer.
454 */
455 iCpu = pTimer->cCpus;
456 while (iCpu-- > 0)
457 {
458#ifdef RTTIMER_LINUX_WITH_HRTIMER
459 if (pTimer->fHighRes)
460 hrtimer_cancel(&pTimer->aSubTimers[iCpu].u.Hr.LnxTimer);
461 else
462#endif
463 del_timer_sync(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer);
464 }
465
466 /*
467 * Finally, free the resources.
468 */
469 RTMemFreeEx(pTimer, RT_UOFFSETOF_DYN(RTTIMER, aSubTimers[pTimer->cCpus]));
470 if (hSpinlock != NIL_RTSPINLOCK)
471 RTSpinlockDestroy(hSpinlock);
472}
473
474
475/**
476 * Workqueue callback (no DECLCALLBACK!) for deferred destruction.
477 *
478 * @param pWork Pointer to the DtorWorkqueueItem member of our timer
479 * structure.
480 */
481static void rtTimerLnxDestroyDeferred(RTR0LNXWORKQUEUEITEM *pWork)
482{
483 PRTTIMER pTimer = RT_FROM_MEMBER(pWork, RTTIMER, DtorWorkqueueItem);
484 rtTimerLnxDestroyIt(pTimer);
485}
486
487
488/**
489 * Called when the timer was destroyed by the callback function.
490 *
491 * @param pTimer The timer.
492 * @param pSubTimer The sub-timer which we're handling, the state of this
493 * will be RTTIMERLNXSTATE_CALLBACK_DESTROYING.
494 */
495static void rtTimerLnxCallbackDestroy(PRTTIMER pTimer, PRTTIMERLNXSUBTIMER pSubTimer)
496{
497 /*
498 * If it's an omni timer, the last dude does the destroying.
499 */
500 if (pTimer->cCpus > 1)
501 {
502 uint32_t iCpu = pTimer->cCpus;
503 RTSpinlockAcquire(pTimer->hSpinlock);
504
505 Assert(pSubTimer->enmState == RTTIMERLNXSTATE_CB_DESTROYING);
506 rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED);
507
508 while (iCpu-- > 0)
509 if (rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState) != RTTIMERLNXSTATE_STOPPED)
510 {
511 RTSpinlockRelease(pTimer->hSpinlock);
512 return;
513 }
514
515 RTSpinlockRelease(pTimer->hSpinlock);
516 }
517
518 /*
519 * Destroying a timer from the callback is unsafe since the callout code
520 * might be touching the timer structure upon return (hrtimer does!). So,
521 * we have to defer the actual destruction to the IRPT workqueue.
522 */
523 rtR0LnxWorkqueuePush(&pTimer->DtorWorkqueueItem, rtTimerLnxDestroyDeferred);
524}
525
526
527#ifdef CONFIG_SMP
528/**
529 * Deal with a sub-timer that has migrated.
530 *
531 * @param pTimer The timer.
532 * @param pSubTimer The sub-timer.
533 */
534static void rtTimerLnxCallbackHandleMigration(PRTTIMER pTimer, PRTTIMERLNXSUBTIMER pSubTimer)
535{
536 RTTIMERLNXSTATE enmState;
537 if (pTimer->cCpus > 1)
538 RTSpinlockAcquire(pTimer->hSpinlock);
539
540 do
541 {
542 enmState = rtTimerLnxGetState(&pSubTimer->enmState);
543 switch (enmState)
544 {
545 case RTTIMERLNXSTATE_STOPPING:
546 case RTTIMERLNXSTATE_MP_STOPPING:
547 enmState = RTTIMERLNXSTATE_STOPPED;
548 case RTTIMERLNXSTATE_STOPPED:
549 break;
550
551 default:
552 AssertMsgFailed(("%d\n", enmState)); RT_FALL_THRU();
553 case RTTIMERLNXSTATE_STARTING:
554 case RTTIMERLNXSTATE_MP_STARTING:
555 case RTTIMERLNXSTATE_ACTIVE:
556 case RTTIMERLNXSTATE_CALLBACK:
557 case RTTIMERLNXSTATE_CB_STOPPING:
558 case RTTIMERLNXSTATE_CB_RESTARTING:
559 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, enmState))
560 enmState = RTTIMERLNXSTATE_STOPPED;
561 break;
562
563 case RTTIMERLNXSTATE_CB_DESTROYING:
564 {
565 if (pTimer->cCpus > 1)
566 RTSpinlockRelease(pTimer->hSpinlock);
567
568 rtTimerLnxCallbackDestroy(pTimer, pSubTimer);
569 return;
570 }
571 }
572 } while (enmState != RTTIMERLNXSTATE_STOPPED);
573
574 if (pTimer->cCpus > 1)
575 RTSpinlockRelease(pTimer->hSpinlock);
576}
577#endif /* CONFIG_SMP */
578
579
580/**
581 * The slow path of rtTimerLnxChangeToCallbackState.
582 *
583 * @returns true if changed successfully, false if not.
584 * @param pSubTimer The sub-timer.
585 */
586static bool rtTimerLnxChangeToCallbackStateSlow(PRTTIMERLNXSUBTIMER pSubTimer)
587{
588 for (;;)
589 {
590 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pSubTimer->enmState);
591 switch (enmState)
592 {
593 case RTTIMERLNXSTATE_ACTIVE:
594 case RTTIMERLNXSTATE_STARTING:
595 case RTTIMERLNXSTATE_MP_STARTING:
596 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_CALLBACK, enmState))
597 return true;
598 break;
599
600 case RTTIMERLNXSTATE_CALLBACK:
601 case RTTIMERLNXSTATE_CB_STOPPING:
602 case RTTIMERLNXSTATE_CB_RESTARTING:
603 case RTTIMERLNXSTATE_CB_DESTROYING:
604 AssertMsgFailed(("%d\n", enmState)); RT_FALL_THRU();
605 default:
606 return false;
607 }
608 ASMNopPause();
609 }
610}
611
612
613/**
614 * Tries to change the sub-timer state to 'callback'.
615 *
616 * @returns true if changed successfully, false if not.
617 * @param pSubTimer The sub-timer.
618 */
619DECLINLINE(bool) rtTimerLnxChangeToCallbackState(PRTTIMERLNXSUBTIMER pSubTimer)
620{
621 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_CALLBACK, RTTIMERLNXSTATE_ACTIVE)))
622 return true;
623 return rtTimerLnxChangeToCallbackStateSlow(pSubTimer);
624}
625
626
627#ifdef RTTIMER_LINUX_WITH_HRTIMER
628/**
629 * Timer callback function for high resolution timers.
630 *
631 * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a
632 * one-shot or interval timer.
633 * @param pHrTimer Pointer to the sub-timer structure.
634 */
635static enum hrtimer_restart rtTimerLinuxHrCallback(struct hrtimer *pHrTimer)
636{
637 PRTTIMERLNXSUBTIMER pSubTimer = RT_FROM_MEMBER(pHrTimer, RTTIMERLNXSUBTIMER, u.Hr.LnxTimer);
638 PRTTIMER pTimer = pSubTimer->pParent;
639
640
641 RTTIMERLNX_LOG(("hrcallback %p\n", pTimer));
642 if (RT_UNLIKELY(!rtTimerLnxChangeToCallbackState(pSubTimer)))
643 return HRTIMER_NORESTART;
644
645#ifdef CONFIG_SMP
646 /*
647 * Check for unwanted migration.
648 */
649 if (pTimer->fAllCpus || pTimer->fSpecificCpu)
650 {
651 RTCPUID idCpu = RTMpCpuId();
652 if (RT_UNLIKELY( pTimer->fAllCpus
653 ? (RTCPUID)(pSubTimer - &pTimer->aSubTimers[0]) != idCpu
654 : pTimer->idCpu != idCpu))
655 {
656 rtTimerLnxCallbackHandleMigration(pTimer, pSubTimer);
657 return HRTIMER_NORESTART;
658 }
659 }
660#endif
661
662 if (pTimer->u64NanoInterval)
663 {
664 /*
665 * Periodic timer, run it and update the native timer afterwards so
666 * we can handle RTTimerStop and RTTimerChangeInterval from the
667 * callback as well as a racing control thread.
668 */
669 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
670 hrtimer_add_expires_ns(&pSubTimer->u.Hr.LnxTimer, ASMAtomicReadU64(&pTimer->u64NanoInterval));
671 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CALLBACK)))
672 return HRTIMER_RESTART;
673 }
674 else
675 {
676 /*
677 * One shot timer (no omni), stop it before dispatching it.
678 * Allow RTTimerStart as well as RTTimerDestroy to be called from
679 * the callback.
680 */
681 ASMAtomicWriteBool(&pTimer->fSuspended, true);
682 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
683 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CALLBACK)))
684 return HRTIMER_NORESTART;
685 }
686
687 /*
688 * Some state change occurred while we were in the callback routine.
689 */
690 for (;;)
691 {
692 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pSubTimer->enmState);
693 switch (enmState)
694 {
695 case RTTIMERLNXSTATE_CB_DESTROYING:
696 rtTimerLnxCallbackDestroy(pTimer, pSubTimer);
697 return HRTIMER_NORESTART;
698
699 case RTTIMERLNXSTATE_CB_STOPPING:
700 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CB_STOPPING))
701 return HRTIMER_NORESTART;
702 break;
703
704 case RTTIMERLNXSTATE_CB_RESTARTING:
705 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CB_RESTARTING))
706 {
707 pSubTimer->iTick = 0;
708 hrtimer_set_expires(&pSubTimer->u.Hr.LnxTimer, rtTimerLnxNanoToKt(pSubTimer->uNsRestartAt));
709 return HRTIMER_RESTART;
710 }
711 break;
712
713 default:
714 AssertMsgFailed(("%d\n", enmState));
715 return HRTIMER_NORESTART;
716 }
717 ASMNopPause();
718 }
719}
720#endif /* RTTIMER_LINUX_WITH_HRTIMER */
721
722
723#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
724/**
725 * Timer callback function for standard timers.
726 *
727 * @param pLnxTimer Pointer to the Linux timer structure.
728 */
729static void rtTimerLinuxStdCallback(struct timer_list *pLnxTimer)
730{
731 PRTTIMERLNXSUBTIMER pSubTimer = from_timer(pSubTimer, pLnxTimer, u.Std.LnxTimer);
732#else
733/**
734 * Timer callback function for standard timers.
735 *
736 * @param ulUser Address of the sub-timer structure.
737 */
738static void rtTimerLinuxStdCallback(unsigned long ulUser)
739{
740 PRTTIMERLNXSUBTIMER pSubTimer = (PRTTIMERLNXSUBTIMER)ulUser;
741#endif
742 PRTTIMER pTimer = pSubTimer->pParent;
743
744 RTTIMERLNX_LOG(("stdcallback %p\n", pTimer));
745 if (RT_UNLIKELY(!rtTimerLnxChangeToCallbackState(pSubTimer)))
746 return;
747
748#ifdef CONFIG_SMP
749 /*
750 * Check for unwanted migration.
751 */
752 if (pTimer->fAllCpus || pTimer->fSpecificCpu)
753 {
754 RTCPUID idCpu = RTMpCpuId();
755 if (RT_UNLIKELY( pTimer->fAllCpus
756 ? (RTCPUID)(pSubTimer - &pTimer->aSubTimers[0]) != idCpu
757 : pTimer->idCpu != idCpu))
758 {
759 rtTimerLnxCallbackHandleMigration(pTimer, pSubTimer);
760 return;
761 }
762 }
763#endif
764
765 if (pTimer->u64NanoInterval)
766 {
767 /*
768 * Interval timer, calculate the next timeout.
769 *
770 * The first time around, we'll re-adjust the u.Std.u64NextTS to
771 * try prevent some jittering if we were started at a bad time.
772 */
773 const uint64_t iTick = ++pSubTimer->iTick;
774 unsigned long uCurJiffies = jiffies;
775 unsigned long ulNextJiffies;
776 uint64_t u64NanoInterval;
777 unsigned long cJiffies;
778 unsigned long flFlags;
779
780 spin_lock_irqsave(&pTimer->ChgIntLock, flFlags);
781 u64NanoInterval = pTimer->u64NanoInterval;
782 cJiffies = pTimer->cJiffies;
783 if (RT_UNLIKELY(pSubTimer->u.Std.fFirstAfterChg))
784 {
785 pSubTimer->u.Std.fFirstAfterChg = false;
786 pSubTimer->u.Std.u64NextTS = RTTimeSystemNanoTS();
787 pSubTimer->u.Std.nsStartTS = pSubTimer->u.Std.u64NextTS - u64NanoInterval * (iTick - 1);
788 pSubTimer->u.Std.ulNextJiffies = uCurJiffies = jiffies;
789 }
790 spin_unlock_irqrestore(&pTimer->ChgIntLock, flFlags);
791
792 pSubTimer->u.Std.u64NextTS += u64NanoInterval;
793 if (cJiffies)
794 {
795 ulNextJiffies = pSubTimer->u.Std.ulNextJiffies + cJiffies;
796 pSubTimer->u.Std.ulNextJiffies = ulNextJiffies;
797 if (time_after_eq(ulNextJiffies, uCurJiffies))
798 { /* likely */ }
799 else
800 {
801 unsigned long cJiffiesBehind = uCurJiffies - ulNextJiffies;
802 ulNextJiffies = uCurJiffies + cJiffies / 2;
803 if (cJiffiesBehind >= HZ / 4) /* Conside if we're lagging too far behind. Screw the u64NextTS member. */
804 pSubTimer->u.Std.ulNextJiffies = ulNextJiffies;
805 /*else: Don't update u.Std.ulNextJiffies so we can continue catching up in the next tick. */
806 }
807 }
808 else
809 {
810 const uint64_t u64NanoTS = RTTimeSystemNanoTS();
811 const int64_t cNsBehind = u64NanoTS - pSubTimer->u.Std.u64NextTS;
812 if (cNsBehind <= 0)
813 ulNextJiffies = uCurJiffies + rtTimerLnxNanoToJiffies(pSubTimer->u.Std.u64NextTS - u64NanoTS);
814 else if (u64NanoInterval >= RT_NS_1SEC_64 * 2 / HZ)
815 {
816 ulNextJiffies = uCurJiffies + rtTimerLnxNanoToJiffies(u64NanoInterval / 2);
817 if (cNsBehind >= RT_NS_1SEC_64 / HZ / 4) /* Conside if we're lagging too far behind. */
818 pSubTimer->u.Std.u64NextTS = u64NanoTS + u64NanoInterval / 2;
819 }
820 else
821 {
822 ulNextJiffies = uCurJiffies + 1;
823 if (cNsBehind >= RT_NS_1SEC_64 / HZ / 4) /* Conside if we're lagging too far behind. */
824 pSubTimer->u.Std.u64NextTS = u64NanoTS + RT_NS_1SEC_64 / HZ;
825 }
826 pSubTimer->u.Std.ulNextJiffies = ulNextJiffies;
827 }
828
829 /*
830 * Run the timer and re-arm it unless the state changed .
831 * .
832 * We must re-arm it afterwards as we're not in a position to undo this .
833 * operation if for instance someone stopped or destroyed us while we .
834 * were in the callback. (Linux takes care of any races here.)
835 */
836 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
837 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CALLBACK)))
838 {
839#ifdef CONFIG_SMP
840 if (pTimer->fSpecificCpu || pTimer->fAllCpus)
841 {
842# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
843 mod_timer(&pSubTimer->u.Std.LnxTimer, ulNextJiffies);
844# else
845 mod_timer_pinned(&pSubTimer->u.Std.LnxTimer, ulNextJiffies);
846# endif
847 }
848 else
849#endif
850 mod_timer(&pSubTimer->u.Std.LnxTimer, ulNextJiffies);
851 return;
852 }
853 }
854 else
855 {
856 /*
857 * One shot timer, stop it before dispatching it.
858 * Allow RTTimerStart as well as RTTimerDestroy to be called from
859 * the callback.
860 */
861 ASMAtomicWriteBool(&pTimer->fSuspended, true);
862 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
863 if (RT_LIKELY(rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CALLBACK)))
864 return;
865 }
866
867 /*
868 * Some state change occurred while we were in the callback routine.
869 */
870 for (;;)
871 {
872 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pSubTimer->enmState);
873 switch (enmState)
874 {
875 case RTTIMERLNXSTATE_CB_DESTROYING:
876 rtTimerLnxCallbackDestroy(pTimer, pSubTimer);
877 return;
878
879 case RTTIMERLNXSTATE_CB_STOPPING:
880 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_CB_STOPPING))
881 return;
882 break;
883
884 case RTTIMERLNXSTATE_CB_RESTARTING:
885 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_ACTIVE, RTTIMERLNXSTATE_CB_RESTARTING))
886 {
887 uint64_t u64NanoTS;
888 uint64_t u64NextTS;
889 unsigned long flFlags;
890
891 spin_lock_irqsave(&pTimer->ChgIntLock, flFlags);
892 u64NextTS = pSubTimer->uNsRestartAt;
893 u64NanoTS = RTTimeSystemNanoTS();
894 pSubTimer->iTick = 0;
895 pSubTimer->u.Std.u64NextTS = u64NextTS;
896 pSubTimer->u.Std.fFirstAfterChg = true;
897 pSubTimer->u.Std.ulNextJiffies = u64NextTS > u64NanoTS
898 ? jiffies + rtTimerLnxNanoToJiffies(u64NextTS - u64NanoTS)
899 : jiffies;
900 spin_unlock_irqrestore(&pTimer->ChgIntLock, flFlags);
901
902#ifdef CONFIG_SMP
903 if (pTimer->fSpecificCpu || pTimer->fAllCpus)
904 {
905# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
906 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
907# else
908 mod_timer_pinned(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
909# endif
910 }
911 else
912#endif
913 mod_timer(&pSubTimer->u.Std.LnxTimer, pSubTimer->u.Std.ulNextJiffies);
914 return;
915 }
916 break;
917
918 default:
919 AssertMsgFailed(("%d\n", enmState));
920 return;
921 }
922 ASMNopPause();
923 }
924}
925
926
927#ifdef CONFIG_SMP
928
929/**
930 * Per-cpu callback function (RTMpOnAll/RTMpOnSpecific).
931 *
932 * @param idCpu The current CPU.
933 * @param pvUser1 Pointer to the timer.
934 * @param pvUser2 Pointer to the argument structure.
935 */
936static DECLCALLBACK(void) rtTimerLnxStartAllOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
937{
938 PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
939 PRTTIMER pTimer = (PRTTIMER)pvUser1;
940 Assert(idCpu < pTimer->cCpus);
941 rtTimerLnxStartSubTimer(&pTimer->aSubTimers[idCpu], pArgs->u64Now, pArgs->u64First, true /*fPinned*/, pTimer->fHighRes);
942}
943
944
945/**
946 * Worker for RTTimerStart() that takes care of the ugly bits.
947 *
948 * @returns RTTimerStart() return value.
949 * @param pTimer The timer.
950 * @param pArgs The argument structure.
951 */
952static int rtTimerLnxOmniStart(PRTTIMER pTimer, PRTTIMERLINUXSTARTONCPUARGS pArgs)
953{
954 RTCPUID iCpu;
955 RTCPUSET OnlineSet;
956 RTCPUSET OnlineSet2;
957 int rc2;
958
959 /*
960 * Prepare all the sub-timers for the startup and then flag the timer
961 * as a whole as non-suspended, make sure we get them all before
962 * clearing fSuspended as the MP handler will be waiting on this
963 * should something happen while we're looping.
964 */
965 RTSpinlockAcquire(pTimer->hSpinlock);
966
967 /* Just make it a omni timer restriction that no stop/start races are allowed. */
968 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
969 if (rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState) != RTTIMERLNXSTATE_STOPPED)
970 {
971 RTSpinlockRelease(pTimer->hSpinlock);
972 return VERR_TIMER_BUSY;
973 }
974
975 do
976 {
977 RTMpGetOnlineSet(&OnlineSet);
978 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
979 {
980 Assert(pTimer->aSubTimers[iCpu].enmState != RTTIMERLNXSTATE_MP_STOPPING);
981 rtTimerLnxSetState(&pTimer->aSubTimers[iCpu].enmState,
982 RTCpuSetIsMember(&OnlineSet, iCpu)
983 ? RTTIMERLNXSTATE_STARTING
984 : RTTIMERLNXSTATE_STOPPED);
985 }
986 } while (!RTCpuSetIsEqual(&OnlineSet, RTMpGetOnlineSet(&OnlineSet2)));
987
988 ASMAtomicWriteBool(&pTimer->fSuspended, false);
989
990 RTSpinlockRelease(pTimer->hSpinlock);
991
992 /*
993 * Start them (can't find any exported function that allows me to
994 * do this without the cross calls).
995 */
996 pArgs->u64Now = RTTimeSystemNanoTS();
997 rc2 = RTMpOnAll(rtTimerLnxStartAllOnCpu, pTimer, pArgs);
998 AssertRC(rc2); /* screw this if it fails. */
999
1000 /*
1001 * Reset the sub-timers who didn't start up (ALL CPUs case).
1002 */
1003 RTSpinlockAcquire(pTimer->hSpinlock);
1004
1005 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
1006 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState, RTTIMERLNXSTATE_STOPPED, RTTIMERLNXSTATE_STARTING))
1007 {
1008 /** @todo very odd case for a rainy day. Cpus that temporarily went offline while
1009 * we were between calls needs to nudged as the MP handler will ignore events for
1010 * them because of the STARTING state. This is an extremely unlikely case - not that
1011 * that means anything in my experience... ;-) */
1012 RTTIMERLNX_LOG(("what!? iCpu=%u -> didn't start\n", iCpu));
1013 }
1014
1015 RTSpinlockRelease(pTimer->hSpinlock);
1016
1017 return VINF_SUCCESS;
1018}
1019
1020
1021/**
1022 * Worker for RTTimerStop() that takes care of the ugly SMP bits.
1023 *
1024 * @returns true if there was any active callbacks, false if not.
1025 * @param pTimer The timer (valid).
1026 * @param fForDestroy Whether this is for RTTimerDestroy or not.
1027 */
1028static bool rtTimerLnxOmniStop(PRTTIMER pTimer, bool fForDestroy)
1029{
1030 bool fActiveCallbacks = false;
1031 RTCPUID iCpu;
1032 RTTIMERLNXSTATE enmState;
1033
1034
1035 /*
1036 * Mark the timer as suspended and flag all timers as stopping, except
1037 * for those being stopped by an MP event.
1038 */
1039 RTSpinlockAcquire(pTimer->hSpinlock);
1040
1041 ASMAtomicWriteBool(&pTimer->fSuspended, true);
1042 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
1043 {
1044 for (;;)
1045 {
1046 enmState = rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState);
1047 if ( enmState == RTTIMERLNXSTATE_STOPPED
1048 || enmState == RTTIMERLNXSTATE_MP_STOPPING)
1049 break;
1050 if ( enmState == RTTIMERLNXSTATE_CALLBACK
1051 || enmState == RTTIMERLNXSTATE_CB_STOPPING
1052 || enmState == RTTIMERLNXSTATE_CB_RESTARTING)
1053 {
1054 Assert(enmState != RTTIMERLNXSTATE_CB_STOPPING || fForDestroy);
1055 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState,
1056 !fForDestroy ? RTTIMERLNXSTATE_CB_STOPPING : RTTIMERLNXSTATE_CB_DESTROYING,
1057 enmState))
1058 {
1059 fActiveCallbacks = true;
1060 break;
1061 }
1062 }
1063 else
1064 {
1065 Assert(enmState == RTTIMERLNXSTATE_ACTIVE);
1066 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState, RTTIMERLNXSTATE_STOPPING, enmState))
1067 break;
1068 }
1069 ASMNopPause();
1070 }
1071 }
1072
1073 RTSpinlockRelease(pTimer->hSpinlock);
1074
1075 /*
1076 * Do the actual stopping. Fortunately, this doesn't require any IPIs.
1077 * Unfortunately it cannot be done synchronously.
1078 */
1079 for (iCpu = 0; iCpu < pTimer->cCpus; iCpu++)
1080 if (rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState) == RTTIMERLNXSTATE_STOPPING)
1081 rtTimerLnxStopSubTimer(&pTimer->aSubTimers[iCpu], pTimer->fHighRes);
1082
1083 return fActiveCallbacks;
1084}
1085
1086
1087/**
1088 * Per-cpu callback function (RTMpOnSpecific) used by rtTimerLinuxMpEvent()
1089 * to start a sub-timer on a cpu that just have come online.
1090 *
1091 * @param idCpu The current CPU.
1092 * @param pvUser1 Pointer to the timer.
1093 * @param pvUser2 Pointer to the argument structure.
1094 */
1095static DECLCALLBACK(void) rtTimerLinuxMpStartOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
1096{
1097 PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
1098 PRTTIMER pTimer = (PRTTIMER)pvUser1;
1099 RTSPINLOCK hSpinlock;
1100 Assert(idCpu < pTimer->cCpus);
1101
1102 /*
1103 * We have to be kind of careful here as we might be racing RTTimerStop
1104 * (and/or RTTimerDestroy, thus the paranoia.
1105 */
1106 hSpinlock = pTimer->hSpinlock;
1107 if ( hSpinlock != NIL_RTSPINLOCK
1108 && pTimer->u32Magic == RTTIMER_MAGIC)
1109 {
1110 RTSpinlockAcquire(hSpinlock);
1111
1112 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
1113 && pTimer->u32Magic == RTTIMER_MAGIC)
1114 {
1115 /* We're sane and the timer is not suspended yet. */
1116 PRTTIMERLNXSUBTIMER pSubTimer = &pTimer->aSubTimers[idCpu];
1117 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STARTING, RTTIMERLNXSTATE_STOPPED))
1118 rtTimerLnxStartSubTimer(pSubTimer, pArgs->u64Now, pArgs->u64First, true /*fPinned*/, pTimer->fHighRes);
1119 }
1120
1121 RTSpinlockRelease(hSpinlock);
1122 }
1123}
1124
1125
1126/**
1127 * MP event notification callback.
1128 *
1129 * @param enmEvent The event.
1130 * @param idCpu The cpu it applies to.
1131 * @param pvUser The timer.
1132 */
1133static DECLCALLBACK(void) rtTimerLinuxMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser)
1134{
1135 PRTTIMER pTimer = (PRTTIMER)pvUser;
1136 PRTTIMERLNXSUBTIMER pSubTimer = &pTimer->aSubTimers[idCpu];
1137 RTSPINLOCK hSpinlock;
1138
1139 Assert(idCpu < pTimer->cCpus);
1140
1141 /*
1142 * Some initial paranoia.
1143 */
1144 if (pTimer->u32Magic != RTTIMER_MAGIC)
1145 return;
1146 hSpinlock = pTimer->hSpinlock;
1147 if (hSpinlock == NIL_RTSPINLOCK)
1148 return;
1149
1150 RTSpinlockAcquire(hSpinlock);
1151
1152 /* Is it active? */
1153 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
1154 && pTimer->u32Magic == RTTIMER_MAGIC)
1155 {
1156 switch (enmEvent)
1157 {
1158 /*
1159 * Try do it without leaving the spin lock, but if we have to, retake it
1160 * when we're on the right cpu.
1161 */
1162 case RTMPEVENT_ONLINE:
1163 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STARTING, RTTIMERLNXSTATE_STOPPED))
1164 {
1165 RTTIMERLINUXSTARTONCPUARGS Args;
1166 Args.u64Now = RTTimeSystemNanoTS();
1167 Args.u64First = 0;
1168
1169 if (RTMpCpuId() == idCpu)
1170 rtTimerLnxStartSubTimer(pSubTimer, Args.u64Now, Args.u64First, true /*fPinned*/, pTimer->fHighRes);
1171 else
1172 {
1173 rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED); /* we'll recheck it. */
1174 RTSpinlockRelease(hSpinlock);
1175
1176 RTMpOnSpecific(idCpu, rtTimerLinuxMpStartOnCpu, pTimer, &Args);
1177 return; /* we've left the spinlock */
1178 }
1179 }
1180 break;
1181
1182 /*
1183 * The CPU is (going) offline, make sure the sub-timer is stopped.
1184 *
1185 * Linux will migrate it to a different CPU, but we don't want this. The
1186 * timer function is checking for this.
1187 */
1188 case RTMPEVENT_OFFLINE:
1189 {
1190 RTTIMERLNXSTATE enmState;
1191 while ( (enmState = rtTimerLnxGetState(&pSubTimer->enmState)) == RTTIMERLNXSTATE_ACTIVE
1192 || enmState == RTTIMERLNXSTATE_CALLBACK
1193 || enmState == RTTIMERLNXSTATE_CB_RESTARTING)
1194 {
1195 if (enmState == RTTIMERLNXSTATE_ACTIVE)
1196 {
1197 if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_MP_STOPPING, RTTIMERLNXSTATE_ACTIVE))
1198 {
1199 RTSpinlockRelease(hSpinlock);
1200
1201 rtTimerLnxStopSubTimer(pSubTimer, pTimer->fHighRes);
1202 return; /* we've left the spinlock */
1203 }
1204 }
1205 else if (rtTimerLnxCmpXchgState(&pSubTimer->enmState, RTTIMERLNXSTATE_CB_STOPPING, enmState))
1206 break;
1207
1208 /* State not stable, try again. */
1209 ASMNopPause();
1210 }
1211 break;
1212 }
1213 }
1214 }
1215
1216 RTSpinlockRelease(hSpinlock);
1217}
1218
1219#endif /* CONFIG_SMP */
1220
1221
1222/**
1223 * Callback function use by RTTimerStart via RTMpOnSpecific to start a timer
1224 * running on a specific CPU.
1225 *
1226 * @param idCpu The current CPU.
1227 * @param pvUser1 Pointer to the timer.
1228 * @param pvUser2 Pointer to the argument structure.
1229 */
1230static DECLCALLBACK(void) rtTimerLnxStartOnSpecificCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
1231{
1232 PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
1233 PRTTIMER pTimer = (PRTTIMER)pvUser1;
1234 RT_NOREF_PV(idCpu);
1235 rtTimerLnxStartSubTimer(&pTimer->aSubTimers[0], pArgs->u64Now, pArgs->u64First, true /*fPinned*/, pTimer->fHighRes);
1236}
1237
1238
1239RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
1240{
1241 RTTIMERLINUXSTARTONCPUARGS Args;
1242 int rc2;
1243 IPRT_LINUX_SAVE_EFL_AC();
1244
1245 /*
1246 * Validate.
1247 */
1248 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
1249 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
1250
1251 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
1252 return VERR_TIMER_ACTIVE;
1253 RTTIMERLNX_LOG(("start %p cCpus=%d\n", pTimer, pTimer->cCpus));
1254
1255 Args.u64First = u64First;
1256#ifdef CONFIG_SMP
1257 /*
1258 * Omni timer?
1259 */
1260 if (pTimer->fAllCpus)
1261 {
1262 rc2 = rtTimerLnxOmniStart(pTimer, &Args);
1263 IPRT_LINUX_RESTORE_EFL_AC();
1264 return rc2;
1265 }
1266#endif
1267
1268 /*
1269 * Simple timer - Pretty straight forward if it wasn't for restarting.
1270 */
1271 Args.u64Now = RTTimeSystemNanoTS();
1272 ASMAtomicWriteU64(&pTimer->aSubTimers[0].uNsRestartAt, Args.u64Now + u64First);
1273 for (;;)
1274 {
1275 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pTimer->aSubTimers[0].enmState);
1276 switch (enmState)
1277 {
1278 case RTTIMERLNXSTATE_STOPPED:
1279 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STARTING, RTTIMERLNXSTATE_STOPPED))
1280 {
1281 ASMAtomicWriteBool(&pTimer->fSuspended, false);
1282 if (!pTimer->fSpecificCpu)
1283 rtTimerLnxStartSubTimer(&pTimer->aSubTimers[0], Args.u64Now, Args.u64First,
1284 false /*fPinned*/, pTimer->fHighRes);
1285 else
1286 {
1287 rc2 = RTMpOnSpecific(pTimer->idCpu, rtTimerLnxStartOnSpecificCpu, pTimer, &Args);
1288 if (RT_FAILURE(rc2))
1289 {
1290 /* Suspend it, the cpu id is probably invalid or offline. */
1291 ASMAtomicWriteBool(&pTimer->fSuspended, true);
1292 rtTimerLnxSetState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STOPPED);
1293 return rc2;
1294 }
1295 }
1296 IPRT_LINUX_RESTORE_EFL_AC();
1297 return VINF_SUCCESS;
1298 }
1299 break;
1300
1301 case RTTIMERLNXSTATE_CALLBACK:
1302 case RTTIMERLNXSTATE_CB_STOPPING:
1303 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_CB_RESTARTING, enmState))
1304 {
1305 ASMAtomicWriteBool(&pTimer->fSuspended, false);
1306 IPRT_LINUX_RESTORE_EFL_AC();
1307 return VINF_SUCCESS;
1308 }
1309 break;
1310
1311 default:
1312 AssertMsgFailed(("%d\n", enmState));
1313 IPRT_LINUX_RESTORE_EFL_AC();
1314 return VERR_INTERNAL_ERROR_4;
1315 }
1316 ASMNopPause();
1317 }
1318}
1319RT_EXPORT_SYMBOL(RTTimerStart);
1320
1321
1322/**
1323 * Common worker for RTTimerStop and RTTimerDestroy.
1324 *
1325 * @returns true if there was any active callbacks, false if not.
1326 * @param pTimer The timer to stop.
1327 * @param fForDestroy Whether it's RTTimerDestroy calling or not.
1328 */
1329static bool rtTimerLnxStop(PRTTIMER pTimer, bool fForDestroy)
1330{
1331 RTTIMERLNX_LOG(("lnxstop %p %d\n", pTimer, fForDestroy));
1332#ifdef CONFIG_SMP
1333 /*
1334 * Omni timer?
1335 */
1336 if (pTimer->fAllCpus)
1337 return rtTimerLnxOmniStop(pTimer, fForDestroy);
1338#endif
1339
1340 /*
1341 * Simple timer.
1342 */
1343 ASMAtomicWriteBool(&pTimer->fSuspended, true);
1344 for (;;)
1345 {
1346 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pTimer->aSubTimers[0].enmState);
1347 switch (enmState)
1348 {
1349 case RTTIMERLNXSTATE_ACTIVE:
1350 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STOPPING, RTTIMERLNXSTATE_ACTIVE))
1351 {
1352 rtTimerLnxStopSubTimer(&pTimer->aSubTimers[0], pTimer->fHighRes);
1353 return false;
1354 }
1355 break;
1356
1357 case RTTIMERLNXSTATE_CALLBACK:
1358 case RTTIMERLNXSTATE_CB_RESTARTING:
1359 case RTTIMERLNXSTATE_CB_STOPPING:
1360 Assert(enmState != RTTIMERLNXSTATE_CB_STOPPING || fForDestroy);
1361 if (rtTimerLnxCmpXchgState(&pTimer->aSubTimers[0].enmState,
1362 !fForDestroy ? RTTIMERLNXSTATE_CB_STOPPING : RTTIMERLNXSTATE_CB_DESTROYING,
1363 enmState))
1364 return true;
1365 break;
1366
1367 case RTTIMERLNXSTATE_STOPPED:
1368 return VINF_SUCCESS;
1369
1370 case RTTIMERLNXSTATE_CB_DESTROYING:
1371 AssertMsgFailed(("enmState=%d pTimer=%p\n", enmState, pTimer));
1372 return true;
1373
1374 default:
1375 case RTTIMERLNXSTATE_STARTING:
1376 case RTTIMERLNXSTATE_MP_STARTING:
1377 case RTTIMERLNXSTATE_STOPPING:
1378 case RTTIMERLNXSTATE_MP_STOPPING:
1379 AssertMsgFailed(("enmState=%d pTimer=%p\n", enmState, pTimer));
1380 return false;
1381 }
1382
1383 /* State not stable, try again. */
1384 ASMNopPause();
1385 }
1386}
1387
1388
1389RTDECL(int) RTTimerStop(PRTTIMER pTimer)
1390{
1391 /*
1392 * Validate.
1393 */
1394 IPRT_LINUX_SAVE_EFL_AC();
1395 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
1396 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
1397 RTTIMERLNX_LOG(("stop %p\n", pTimer));
1398
1399 if (ASMAtomicUoReadBool(&pTimer->fSuspended))
1400 return VERR_TIMER_SUSPENDED;
1401
1402 rtTimerLnxStop(pTimer, false /*fForDestroy*/);
1403
1404 IPRT_LINUX_RESTORE_EFL_AC();
1405 return VINF_SUCCESS;
1406}
1407RT_EXPORT_SYMBOL(RTTimerStop);
1408
1409
1410RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
1411{
1412 unsigned long cJiffies;
1413 unsigned long flFlags;
1414 IPRT_LINUX_SAVE_EFL_AC();
1415
1416 /*
1417 * Validate.
1418 */
1419 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
1420 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
1421 AssertReturn(u64NanoInterval, VERR_INVALID_PARAMETER);
1422 AssertReturn(u64NanoInterval < UINT64_MAX / 8, VERR_INVALID_PARAMETER);
1423 AssertReturn(pTimer->u64NanoInterval, VERR_INVALID_STATE);
1424 RTTIMERLNX_LOG(("change %p %llu\n", pTimer, u64NanoInterval));
1425
1426#ifdef RTTIMER_LINUX_WITH_HRTIMER
1427 /*
1428 * For the high resolution timers it is easy since we don't care so much
1429 * about when it is applied to the sub-timers.
1430 */
1431 if (pTimer->fHighRes)
1432 {
1433 ASMAtomicWriteU64(&pTimer->u64NanoInterval, u64NanoInterval);
1434 IPRT_LINUX_RESTORE_EFL_AC();
1435 return VINF_SUCCESS;
1436 }
1437#endif
1438
1439 /*
1440 * Standard timers have a bit more complicated way of calculating
1441 * their interval and such. So, forget omni timers for now.
1442 */
1443 if (pTimer->cCpus > 1)
1444 return VERR_NOT_SUPPORTED;
1445
1446 cJiffies = u64NanoInterval / (RT_NS_1SEC / HZ);
1447 if (cJiffies * (RT_NS_1SEC / HZ) != u64NanoInterval)
1448 cJiffies = 0;
1449
1450 spin_lock_irqsave(&pTimer->ChgIntLock, flFlags);
1451 pTimer->aSubTimers[0].u.Std.fFirstAfterChg = true;
1452 pTimer->cJiffies = cJiffies;
1453 ASMAtomicWriteU64(&pTimer->u64NanoInterval, u64NanoInterval);
1454 spin_unlock_irqrestore(&pTimer->ChgIntLock, flFlags);
1455 IPRT_LINUX_RESTORE_EFL_AC();
1456 return VINF_SUCCESS;
1457}
1458RT_EXPORT_SYMBOL(RTTimerChangeInterval);
1459
1460
1461RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
1462{
1463 bool fCanDestroy;
1464 IPRT_LINUX_SAVE_EFL_AC();
1465
1466 /*
1467 * Validate. It's ok to pass NULL pointer.
1468 */
1469 if (pTimer == /*NIL_RTTIMER*/ NULL)
1470 return VINF_SUCCESS;
1471 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
1472 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
1473 RTTIMERLNX_LOG(("destroy %p\n", pTimer));
1474/** @todo We should invalidate the magic here! */
1475
1476 /*
1477 * Stop the timer if it's still active, then destroy it if we can.
1478 */
1479 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
1480 fCanDestroy = rtTimerLnxStop(pTimer, true /*fForDestroy*/);
1481 else
1482 {
1483 uint32_t iCpu = pTimer->cCpus;
1484 if (pTimer->cCpus > 1)
1485 RTSpinlockAcquire(pTimer->hSpinlock);
1486
1487 fCanDestroy = true;
1488 while (iCpu-- > 0)
1489 {
1490 for (;;)
1491 {
1492 RTTIMERLNXSTATE enmState = rtTimerLnxGetState(&pTimer->aSubTimers[iCpu].enmState);
1493 switch (enmState)
1494 {
1495 case RTTIMERLNXSTATE_CALLBACK:
1496 case RTTIMERLNXSTATE_CB_RESTARTING:
1497 case RTTIMERLNXSTATE_CB_STOPPING:
1498 if (!rtTimerLnxCmpXchgState(&pTimer->aSubTimers[iCpu].enmState, RTTIMERLNXSTATE_CB_DESTROYING, enmState))
1499 continue;
1500 fCanDestroy = false;
1501 break;
1502
1503 case RTTIMERLNXSTATE_CB_DESTROYING:
1504 AssertMsgFailed(("%d\n", enmState));
1505 fCanDestroy = false;
1506 break;
1507 default:
1508 break;
1509 }
1510 break;
1511 }
1512 }
1513
1514 if (pTimer->cCpus > 1)
1515 RTSpinlockRelease(pTimer->hSpinlock);
1516 }
1517
1518 if (fCanDestroy)
1519 {
1520 /* For paranoid reasons, defer actually destroying the semaphore when
1521 in atomic or interrupt context. */
1522#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 32)
1523 if (in_atomic() || in_interrupt())
1524#else
1525 if (in_interrupt())
1526#endif
1527 rtR0LnxWorkqueuePush(&pTimer->DtorWorkqueueItem, rtTimerLnxDestroyDeferred);
1528 else
1529 rtTimerLnxDestroyIt(pTimer);
1530 }
1531
1532 IPRT_LINUX_RESTORE_EFL_AC();
1533 return VINF_SUCCESS;
1534}
1535RT_EXPORT_SYMBOL(RTTimerDestroy);
1536
1537
1538RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
1539{
1540 PRTTIMER pTimer;
1541 RTCPUID iCpu;
1542 unsigned cCpus;
1543 int rc;
1544 IPRT_LINUX_SAVE_EFL_AC();
1545
1546 rtR0LnxWorkqueueFlush(); /* for 2.4 */
1547 *ppTimer = NULL;
1548
1549 /*
1550 * Validate flags.
1551 */
1552 if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
1553 {
1554 IPRT_LINUX_RESTORE_EFL_AC();
1555 return VERR_INVALID_PARAMETER;
1556 }
1557 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
1558 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
1559 && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
1560 {
1561 IPRT_LINUX_RESTORE_EFL_AC();
1562 return VERR_CPU_NOT_FOUND;
1563 }
1564
1565 /*
1566 * Allocate the timer handler.
1567 */
1568 cCpus = 1;
1569#ifdef CONFIG_SMP
1570 if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
1571 {
1572 cCpus = RTMpGetMaxCpuId() + 1;
1573 Assert(cCpus <= RTCPUSET_MAX_CPUS); /* On linux we have a 1:1 relationship between cpuid and set index. */
1574 AssertReturnStmt(u64NanoInterval, IPRT_LINUX_RESTORE_EFL_AC(), VERR_NOT_IMPLEMENTED); /* We don't implement single shot on all cpus, sorry. */
1575 }
1576#endif
1577
1578 rc = RTMemAllocEx(RT_UOFFSETOF_DYN(RTTIMER, aSubTimers[cCpus]), 0,
1579 RTMEMALLOCEX_FLAGS_ZEROED | RTMEMALLOCEX_FLAGS_ANY_CTX_FREE, (void **)&pTimer);
1580 if (RT_FAILURE(rc))
1581 {
1582 IPRT_LINUX_RESTORE_EFL_AC();
1583 return rc;
1584 }
1585
1586 /*
1587 * Initialize it.
1588 */
1589 pTimer->u32Magic = RTTIMER_MAGIC;
1590 pTimer->hSpinlock = NIL_RTSPINLOCK;
1591 pTimer->fSuspended = true;
1592 pTimer->fHighRes = !!(fFlags & RTTIMER_FLAGS_HIGH_RES);
1593#ifdef CONFIG_SMP
1594 pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL;
1595 pTimer->fAllCpus = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL;
1596 pTimer->idCpu = pTimer->fSpecificCpu
1597 ? RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)
1598 : NIL_RTCPUID;
1599#else
1600 pTimer->fSpecificCpu = !!(fFlags & RTTIMER_FLAGS_CPU_SPECIFIC);
1601 pTimer->idCpu = RTMpCpuId();
1602#endif
1603 pTimer->cCpus = cCpus;
1604 pTimer->pfnTimer = pfnTimer;
1605 pTimer->pvUser = pvUser;
1606 pTimer->u64NanoInterval = u64NanoInterval;
1607 pTimer->cJiffies = u64NanoInterval / (RT_NS_1SEC / HZ);
1608 if (pTimer->cJiffies * (RT_NS_1SEC / HZ) != u64NanoInterval)
1609 pTimer->cJiffies = 0;
1610 spin_lock_init(&pTimer->ChgIntLock);
1611
1612 for (iCpu = 0; iCpu < cCpus; iCpu++)
1613 {
1614#ifdef RTTIMER_LINUX_WITH_HRTIMER
1615 if (pTimer->fHighRes)
1616 {
1617 hrtimer_init(&pTimer->aSubTimers[iCpu].u.Hr.LnxTimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1618 pTimer->aSubTimers[iCpu].u.Hr.LnxTimer.function = rtTimerLinuxHrCallback;
1619 }
1620 else
1621#endif
1622 {
1623#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
1624 timer_setup(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer, rtTimerLinuxStdCallback, TIMER_PINNED);
1625#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
1626 init_timer_pinned(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer);
1627#else
1628 init_timer(&pTimer->aSubTimers[iCpu].u.Std.LnxTimer);
1629#endif
1630#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
1631 pTimer->aSubTimers[iCpu].u.Std.LnxTimer.data = (unsigned long)&pTimer->aSubTimers[iCpu];
1632 pTimer->aSubTimers[iCpu].u.Std.LnxTimer.function = rtTimerLinuxStdCallback;
1633#endif
1634 pTimer->aSubTimers[iCpu].u.Std.LnxTimer.expires = jiffies;
1635 pTimer->aSubTimers[iCpu].u.Std.u64NextTS = 0;
1636 }
1637 pTimer->aSubTimers[iCpu].iTick = 0;
1638 pTimer->aSubTimers[iCpu].pParent = pTimer;
1639 pTimer->aSubTimers[iCpu].enmState = RTTIMERLNXSTATE_STOPPED;
1640 }
1641
1642#ifdef CONFIG_SMP
1643 /*
1644 * If this is running on ALL cpus, we'll have to register a callback
1645 * for MP events (so timers can be started/stopped on cpus going
1646 * online/offline). We also create the spinlock for synchronizing
1647 * stop/start/mp-event.
1648 */
1649 if (cCpus > 1)
1650 {
1651 int rc = RTSpinlockCreate(&pTimer->hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "RTTimerLnx");
1652 if (RT_SUCCESS(rc))
1653 rc = RTMpNotificationRegister(rtTimerLinuxMpEvent, pTimer);
1654 else
1655 pTimer->hSpinlock = NIL_RTSPINLOCK;
1656 if (RT_FAILURE(rc))
1657 {
1658 RTTimerDestroy(pTimer);
1659 IPRT_LINUX_RESTORE_EFL_AC();
1660 return rc;
1661 }
1662 }
1663#endif /* CONFIG_SMP */
1664
1665 RTTIMERLNX_LOG(("create %p hires=%d fFlags=%#x cCpus=%u\n", pTimer, pTimer->fHighRes, fFlags, cCpus));
1666 *ppTimer = pTimer;
1667 IPRT_LINUX_RESTORE_EFL_AC();
1668 return VINF_SUCCESS;
1669}
1670RT_EXPORT_SYMBOL(RTTimerCreateEx);
1671
1672
1673RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
1674{
1675#if 0 /** @todo Not sure if this is what we want or not... Add new API for
1676 * querying the resolution of the high res timers? */
1677 struct timespec Ts;
1678 int rc;
1679 IPRT_LINUX_SAVE_EFL_AC();
1680 rc = hrtimer_get_res(CLOCK_MONOTONIC, &Ts);
1681 IPRT_LINUX_RESTORE_EFL_AC();
1682 if (!rc)
1683 {
1684 Assert(!Ts.tv_sec);
1685 return Ts.tv_nsec;
1686 }
1687#endif
1688 /* */
1689#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) || LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
1690 /* On 4.9, 4.10 and 4.12 we've observed tstRTR0Timer failures of the omni timer tests
1691 where we get about half of the ticks we want. The failing test is using this value
1692 as interval. So, this is a very very crude hack to try make omni timers work
1693 correctly without actually knowing what's going wrong... */
1694 return RT_NS_1SEC * 2 / HZ; /* ns */
1695#else
1696 return RT_NS_1SEC / HZ; /* ns */
1697#endif
1698}
1699RT_EXPORT_SYMBOL(RTTimerGetSystemGranularity);
1700
1701
1702RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
1703{
1704 RT_NOREF_PV(u32Request); RT_NOREF_PV(*pu32Granted);
1705 return VERR_NOT_SUPPORTED;
1706}
1707RT_EXPORT_SYMBOL(RTTimerRequestSystemGranularity);
1708
1709
1710RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
1711{
1712 RT_NOREF_PV(u32Granted);
1713 return VERR_NOT_SUPPORTED;
1714}
1715RT_EXPORT_SYMBOL(RTTimerReleaseSystemGranularity);
1716
1717
1718RTDECL(bool) RTTimerCanDoHighResolution(void)
1719{
1720#ifdef RTTIMER_LINUX_WITH_HRTIMER
1721 return true;
1722#else
1723 return false;
1724#endif
1725}
1726RT_EXPORT_SYMBOL(RTTimerCanDoHighResolution);
1727
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette