VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/timer-r0drv-solaris.c@ 54233

Last change on this file since 54233 was 54233, checked in by vboxsync, 10 years ago

Runtime/r0drv/solaris: fixed forceful rescheduling of timer-callback when firing on the wrong CPU due to a race between cyclic_add() and cyclic_bind().

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.0 KB
Line 
1/* $Id: timer-r0drv-solaris.c 54233 2015-02-17 14:57:26Z vboxsync $ */
2/** @file
3 * IPRT - Timer, Ring-0 Driver, Solaris.
4 */
5
6/*
7 * Copyright (C) 2006-2014 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-solaris-kernel.h"
32#include "internal/iprt.h"
33#include <iprt/timer.h>
34
35#include <iprt/asm.h>
36#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
37# include <iprt/asm-amd64-x86.h>
38#endif
39#include <iprt/assert.h>
40#include <iprt/err.h>
41#include <iprt/mem.h>
42#include <iprt/mp.h>
43#include <iprt/spinlock.h>
44#include <iprt/time.h>
45#include <iprt/thread.h>
46#include "internal/magics.h"
47
48
49/*******************************************************************************
50* Structures and Typedefs *
51*******************************************************************************/
52/**
53 * The internal representation of a Solaris timer handle.
54 */
55typedef struct RTTIMER
56{
57 /** Magic.
58 * This is RTTIMER_MAGIC, but changes to something else before the timer
59 * is destroyed to indicate clearly that thread should exit. */
60 uint32_t volatile u32Magic;
61 /** Reference counter. */
62 uint32_t volatile cRefs;
63 /** Flag indicating that the timer is suspended (hCyclicId should be
64 * CYCLIC_NONE). */
65 bool volatile fSuspended;
66 /** Flag indicating that the timer was suspended from the timer callback and
67 * therefore the hCyclicId may still be valid. */
68 bool volatile fSuspendedFromTimer;
69 /** Flag indicating that the timer interval was changed and that it requires
70 * manual expiration time programming for each callout. */
71 bool volatile fIntervalChanged;
72 /** Whether the timer must run on all CPUs or not. */
73 uint8_t fAllCpus;
74 /** Whether the timer must run on a specific CPU or not. */
75 uint8_t fSpecificCpu;
76 /** The CPU it must run on if fSpecificCpu is set. */
77 uint32_t iCpu;
78 /** The nano second interval for repeating timers. */
79 uint64_t volatile cNsInterval;
80 /** Cyclic timer Id. This is CYCLIC_NONE if no active timer.
81 * @remarks Please keep in mind that cyclic may call us back before the
82 * cyclic_add/cyclic_add_omni functions returns, so don't use this
83 * unguarded with cyclic_reprogram. */
84 cyclic_id_t hCyclicId;
85 /** The user callback. */
86 PFNRTTIMER pfnTimer;
87 /** The argument for the user callback. */
88 void *pvUser;
89 /** Union with timer type specific data. */
90 union
91 {
92 /** Single timer (fAllCpus == false). */
93 struct
94 {
95 /** Cyclic handler. */
96 cyc_handler_t Handler;
97 /** Cyclic time and interval representation. */
98 cyc_time_t FireTime;
99 /** Timer ticks. */
100 uint64_t u64Tick;
101 /** The next tick when fIntervalChanged is true, otherwise 0. */
102 uint64_t nsNextTick;
103 /** The (interrupt) thread currently active in the callback. */
104 kthread_t * volatile pActiveThread;
105 } Single;
106
107 /** Omni timer (fAllCpus == true). */
108 struct
109 {
110 /** Absolute timestamp of when the timer should fire first when starting up. */
111 uint64_t u64When;
112 /** Array of per CPU data (variable size). */
113 struct
114 {
115 /** Timer ticks (reinitialized when online'd). */
116 uint64_t u64Tick;
117 /** The (interrupt) thread currently active in the callback. */
118 kthread_t * volatile pActiveThread;
119 /** The next tick when fIntervalChanged is true, otherwise 0. */
120 uint64_t nsNextTick;
121 } aPerCpu[1];
122 } Omni;
123 } u;
124} RTTIMER;
125
126
127/*******************************************************************************
128* Defined Constants And Macros *
129*******************************************************************************/
130/** Validates that the timer is valid. */
131#define RTTIMER_ASSERT_VALID_RET(pTimer) \
132 do \
133 { \
134 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE); \
135 AssertMsgReturn((pTimer)->u32Magic == RTTIMER_MAGIC, ("pTimer=%p u32Magic=%x expected %x\n", (pTimer), (pTimer)->u32Magic, RTTIMER_MAGIC), \
136 VERR_INVALID_HANDLE); \
137 } while (0)
138
139
140/*******************************************************************************
141* Internal Functions *
142*******************************************************************************/
143static void rtTimerSolSingleCallbackWrapper(void *pvArg);
144static void rtTimerSolStopIt(PRTTIMER pTimer);
145
146
147/**
148 * Retains a reference to the timer.
149 *
150 * @returns New reference counter value.
151 * @param pTimer The timer.
152 */
153DECLINLINE(uint32_t) rtTimerSolRetain(PRTTIMER pTimer)
154{
155 return ASMAtomicIncU32(&pTimer->cRefs);
156}
157
158
159/**
160 * Destroys the timer when the reference counter has reached zero.
161 *
162 * @returns 0 (new references counter value).
163 * @param pTimer The timer.
164 */
165static uint32_t rtTimeSolReleaseCleanup(PRTTIMER pTimer)
166{
167 Assert(pTimer->hCyclicId == CYCLIC_NONE);
168 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
169 RTMemFree(pTimer);
170 return 0;
171}
172
173
174/**
175 * Releases a reference to the timer.
176 *
177 * @returns New reference counter value.
178 * @param pTimer The timer.
179 */
180DECLINLINE(uint32_t) rtTimerSolRelease(PRTTIMER pTimer)
181{
182 uint32_t cRefs = ASMAtomicDecU32(&pTimer->cRefs);
183 if (!cRefs)
184 return rtTimeSolReleaseCleanup(pTimer);
185 return cRefs;
186}
187
188
189/**
190 * RTMpOnSpecific callback used by rtTimerSolCallbackWrapper() to deal with
191 * callouts on the wrong CPU (race with cyclic_bind).
192 *
193 * @param idCpu The CPU this is fired on.
194 * @param pvUser1 Opaque pointer to the timer.
195 * @param pvUser2 Not used, NULL.
196 */
197static void rtTimerSolMpCallbackWrapper(RTCPUID idCpu, void *pvUser1, void *pvUser2)
198{
199 PRTTIMER pTimer = (PRTTIMER)pvUser1;
200 AssertPtrReturnVoid(pTimer);
201 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
202 AssertReturnVoid(pTimer->iCpu == RTMpCpuId()); /* ASSUMES: index == cpuid */
203
204 /* This avoids some code duplication. */
205 rtTimerSolSingleCallbackWrapper(pTimer);
206}
207
208
209/**
210 * Callback wrapper for single-CPU timers.
211 *
212 * @param pvArg Opaque pointer to the timer.
213 *
214 * @remarks This will be executed in interrupt context but only at the specified
215 * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the
216 * cyclic subsystem here, neither should pfnTimer().
217 */
218static void rtTimerSolSingleCallbackWrapper(void *pvArg)
219{
220 PRTTIMER pTimer = (PRTTIMER)pvArg;
221 AssertPtrReturnVoid(pTimer);
222 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
223 Assert(!pTimer->fAllCpus);
224
225 /* Make sure one-shots do not fire another time. */
226 Assert( !pTimer->fSuspended
227 || pTimer->cNsInterval != 0);
228
229 if (!pTimer->fSuspendedFromTimer)
230 {
231 /* Make sure we are firing on the right CPU. */
232 Assert( !pTimer->fSpecificCpu
233 || pTimer->iCpu == RTMpCpuId());
234
235 /* For one-shot, we may allow the callback to restart them. */
236 if (pTimer->cNsInterval == 0)
237 pTimer->fSuspendedFromTimer = true;
238
239 /*
240 * Perform the callout.
241 */
242 pTimer->u.Single.pActiveThread = curthread;
243
244 uint64_t u64Tick = ++pTimer->u.Single.u64Tick;
245 pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);
246
247 pTimer->u.Single.pActiveThread = NULL;
248
249 if (RT_LIKELY(!pTimer->fSuspendedFromTimer))
250 {
251 if ( !pTimer->fIntervalChanged
252 || RT_UNLIKELY(pTimer->hCyclicId == CYCLIC_NONE))
253 return;
254
255 /*
256 * The interval was changed, we need to set the expiration time
257 * ourselves before returning. This comes at a slight cost,
258 * which is why we don't do it all the time.
259 */
260 if (pTimer->u.Single.nsNextTick)
261 pTimer->u.Single.nsNextTick += ASMAtomicUoReadU64(&pTimer->cNsInterval);
262 else
263 pTimer->u.Single.nsNextTick = RTTimeSystemNanoTS() + ASMAtomicUoReadU64(&pTimer->cNsInterval);
264 cyclic_reprogram(pTimer->hCyclicId, pTimer->u.Single.nsNextTick);
265 return;
266 }
267
268 /*
269 * The timer has been suspended, set expiration time to infinitiy.
270 */
271 }
272 if (RT_LIKELY(pTimer->hCyclicId != CYCLIC_NONE))
273 cyclic_reprogram(pTimer->hCyclicId, CY_INFINITY);
274}
275
276
277/**
278 * Callback wrapper for Omni-CPU timers.
279 *
280 * @param pvArg Opaque pointer to the timer.
281 *
282 * @remarks This will be executed in interrupt context but only at the specified
283 * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the
284 * cyclic subsystem here, neither should pfnTimer().
285 */
286static void rtTimerSolOmniCallbackWrapper(void *pvArg)
287{
288 PRTTIMER pTimer = (PRTTIMER)pvArg;
289 AssertPtrReturnVoid(pTimer);
290 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
291 Assert(pTimer->fAllCpus);
292
293 if (!pTimer->fSuspendedFromTimer)
294 {
295 /*
296 * Perform the callout.
297 */
298 uint32_t const iCpu = CPU->cpu_id;
299
300 pTimer->u.Omni.aPerCpu[iCpu].pActiveThread = curthread;
301 uint64_t u64Tick = ++pTimer->u.Omni.aPerCpu[iCpu].u64Tick;
302
303 pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);
304
305 pTimer->u.Omni.aPerCpu[iCpu].pActiveThread = NULL;
306
307 if (RT_LIKELY(!pTimer->fSuspendedFromTimer))
308 {
309 if ( !pTimer->fIntervalChanged
310 || RT_UNLIKELY(pTimer->hCyclicId == CYCLIC_NONE))
311 return;
312
313 /*
314 * The interval was changed, we need to set the expiration time
315 * ourselves before returning. This comes at a slight cost,
316 * which is why we don't do it all the time.
317 *
318 * Note! The cyclic_reprogram call only affects the omni cyclic
319 * component for this CPU.
320 */
321 if (pTimer->u.Omni.aPerCpu[iCpu].nsNextTick)
322 pTimer->u.Omni.aPerCpu[iCpu].nsNextTick += ASMAtomicUoReadU64(&pTimer->cNsInterval);
323 else
324 pTimer->u.Omni.aPerCpu[iCpu].nsNextTick = RTTimeSystemNanoTS() + ASMAtomicUoReadU64(&pTimer->cNsInterval);
325 cyclic_reprogram(pTimer->hCyclicId, pTimer->u.Omni.aPerCpu[iCpu].nsNextTick);
326 return;
327 }
328
329 /*
330 * The timer has been suspended, set expiration time to infinitiy.
331 */
332 }
333 if (RT_LIKELY(pTimer->hCyclicId != CYCLIC_NONE))
334 cyclic_reprogram(pTimer->hCyclicId, CY_INFINITY);
335}
336
337
338/**
339 * Omni-CPU cyclic online event. This is called before the omni cycle begins to
340 * fire on the specified CPU.
341 *
342 * @param pvArg Opaque pointer to the timer.
343 * @param pCpu Pointer to the CPU on which it will fire.
344 * @param pCyclicHandler Pointer to a cyclic handler to add to the CPU
345 * specified in @a pCpu.
346 * @param pCyclicTime Pointer to the cyclic time and interval object.
347 *
348 * @remarks We -CANNOT- call back into the cyclic subsystem here, we can however
349 * block (sleep).
350 */
351static void rtTimerSolOmniCpuOnline(void *pvArg, cpu_t *pCpu, cyc_handler_t *pCyclicHandler, cyc_time_t *pCyclicTime)
352{
353 PRTTIMER pTimer = (PRTTIMER)pvArg;
354 AssertPtrReturnVoid(pTimer);
355 AssertPtrReturnVoid(pCpu);
356 AssertPtrReturnVoid(pCyclicHandler);
357 AssertPtrReturnVoid(pCyclicTime);
358 uint32_t const iCpu = pCpu->cpu_id; /* Note! CPU is not necessarily the same as pCpu. */
359
360 pTimer->u.Omni.aPerCpu[iCpu].u64Tick = 0;
361 pTimer->u.Omni.aPerCpu[iCpu].nsNextTick = 0;
362
363 pCyclicHandler->cyh_func = (cyc_func_t)rtTimerSolOmniCallbackWrapper;
364 pCyclicHandler->cyh_arg = pTimer;
365 pCyclicHandler->cyh_level = CY_LOCK_LEVEL;
366
367 uint64_t u64Now = RTTimeSystemNanoTS();
368 if (pTimer->u.Omni.u64When < u64Now)
369 pCyclicTime->cyt_when = u64Now + pTimer->cNsInterval / 2;
370 else
371 pCyclicTime->cyt_when = pTimer->u.Omni.u64When;
372
373 pCyclicTime->cyt_interval = pTimer->cNsInterval;
374}
375
376
377RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
378{
379 RT_ASSERT_PREEMPTIBLE();
380 *ppTimer = NULL;
381
382 /*
383 * Validate flags.
384 */
385 if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
386 return VERR_INVALID_PARAMETER;
387
388 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
389 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
390 && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
391 return VERR_CPU_NOT_FOUND;
392
393 /* One-shot omni timers are not supported by the cyclic system. */
394 if ( (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL
395 && u64NanoInterval == 0)
396 return VERR_NOT_SUPPORTED;
397
398 /*
399 * Allocate and initialize the timer handle. The omni variant has a
400 * variable sized array of ticks counts, thus the size calculation.
401 */
402 PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ( (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL
403 ? RT_OFFSETOF(RTTIMER, u.Omni.aPerCpu[RTMpGetCount()])
404 : sizeof(RTTIMER));
405 if (!pTimer)
406 return VERR_NO_MEMORY;
407
408 pTimer->u32Magic = RTTIMER_MAGIC;
409 pTimer->cRefs = 1;
410 pTimer->fSuspended = true;
411 pTimer->fSuspendedFromTimer = false;
412 pTimer->fIntervalChanged = false;
413 if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
414 {
415 pTimer->fAllCpus = true;
416 pTimer->fSpecificCpu = false;
417 pTimer->iCpu = UINT32_MAX;
418 }
419 else if (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
420 {
421 pTimer->fAllCpus = false;
422 pTimer->fSpecificCpu = true;
423 pTimer->iCpu = fFlags & RTTIMER_FLAGS_CPU_MASK; /* ASSUMES: index == cpuid */
424 }
425 else
426 {
427 pTimer->fAllCpus = false;
428 pTimer->fSpecificCpu = false;
429 pTimer->iCpu = UINT32_MAX;
430 }
431 pTimer->cNsInterval = u64NanoInterval;
432 pTimer->pfnTimer = pfnTimer;
433 pTimer->pvUser = pvUser;
434 pTimer->hCyclicId = CYCLIC_NONE;
435
436 *ppTimer = pTimer;
437 return VINF_SUCCESS;
438}
439
440
441/**
442 * Checks if the calling thread is currently executing the timer proceduce for
443 * the given timer.
444 *
445 * @returns true if it is, false if it isn't.
446 * @param pTimer The timer in question.
447 */
448DECLINLINE(bool) rtTimerSolIsCallingFromTimerProc(PRTTIMER pTimer)
449{
450 kthread_t *pCurThread = curthread;
451 AssertReturn(pCurThread, false); /* serious paranoia */
452
453 if (!pTimer->fAllCpus)
454 return pTimer->u.Single.pActiveThread == pCurThread;
455 return pTimer->u.Omni.aPerCpu[CPU->cpu_id].pActiveThread == pCurThread;
456}
457
458
459RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
460{
461 if (pTimer == NULL)
462 return VINF_SUCCESS;
463 RTTIMER_ASSERT_VALID_RET(pTimer);
464 RT_ASSERT_INTS_ON();
465
466 /*
467 * It is not possible to destroy a timer from it's callback function.
468 * Cyclic makes that impossible (or at least extremely risky).
469 */
470 AssertReturn(!rtTimerSolIsCallingFromTimerProc(pTimer), VERR_INVALID_CONTEXT);
471
472 /*
473 * Invalidate the handle, make sure it's stopped nad free the associated resources.
474 */
475 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
476
477 if ( !pTimer->fSuspended
478 || pTimer->hCyclicId != CYCLIC_NONE) /* 2nd check shouldn't happen */
479 rtTimerSolStopIt(pTimer);
480
481 rtTimerSolRelease(pTimer);
482 return VINF_SUCCESS;
483}
484
485
486RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
487{
488 RTTIMER_ASSERT_VALID_RET(pTimer);
489 RT_ASSERT_INTS_ON();
490
491 /*
492 * It's not possible to restart a one-shot time from it's callback function,
493 * at least not at the moment.
494 */
495 AssertReturn(!rtTimerSolIsCallingFromTimerProc(pTimer), VERR_INVALID_CONTEXT);
496
497
498 mutex_enter(&cpu_lock);
499
500 /*
501 * Make sure it's not active already. If it was suspended from a timer
502 * callback function, we need to do some cleanup work here before we can
503 * restart the timer.
504 */
505 if (!pTimer->fSuspended)
506 {
507 if (!pTimer->fSuspendedFromTimer)
508 {
509 mutex_exit(&cpu_lock);
510 return VERR_TIMER_ACTIVE;
511 }
512 cyclic_remove(pTimer->hCyclicId);
513 pTimer->hCyclicId = CYCLIC_NONE;
514 }
515
516 pTimer->fSuspended = false;
517 pTimer->fSuspendedFromTimer = false;
518 pTimer->fIntervalChanged = false;
519 if (pTimer->fAllCpus)
520 {
521 /*
522 * Setup omni (all CPU) timer. The Omni-CPU online event will fire
523 * and from there we setup periodic timers per CPU.
524 */
525 pTimer->u.Omni.u64When = RTTimeSystemNanoTS() + (u64First ? u64First : pTimer->cNsInterval);
526
527 cyc_omni_handler_t HandlerOmni;
528 HandlerOmni.cyo_online = rtTimerSolOmniCpuOnline;
529 HandlerOmni.cyo_offline = NULL;
530 HandlerOmni.cyo_arg = pTimer;
531
532 pTimer->hCyclicId = cyclic_add_omni(&HandlerOmni);
533 }
534 else
535 {
536 /*
537 * Setup a single CPU timer. If a specific CPU was requested, it
538 * must be online or the timer cannot start.
539 */
540 if ( pTimer->fSpecificCpu
541 && !RTMpIsCpuOnline(pTimer->iCpu)) /* ASSUMES: index == cpuid */
542 {
543 pTimer->fSuspended = true;
544
545 mutex_exit(&cpu_lock);
546 return VERR_CPU_OFFLINE;
547 }
548
549 /** @todo we probably don't need to have cyc_handler_t and cyc_time_t in the
550 * timer structure... */
551 pTimer->u.Single.Handler.cyh_func = (cyc_func_t)rtTimerSolSingleCallbackWrapper;
552 pTimer->u.Single.Handler.cyh_arg = pTimer;
553 pTimer->u.Single.Handler.cyh_level = CY_LOCK_LEVEL;
554
555 /*
556 * Use a large interval (1 hour) so that we don't get a timer-callback between
557 * cyclic_add() and cyclic_bind(). Program the correct interval once cyclic_bind() is done.
558 * See @bugref{7691} comment #20.
559 */
560 pTimer->u.Single.FireTime.cyt_when = RTTimeSystemNanoTS() + u64First;
561 if (pTimer->fSpecificCpu)
562 pTimer->u.Single.FireTime.cyt_when += RT_NS_1HOUR;
563 pTimer->u.Single.FireTime.cyt_interval = pTimer->cNsInterval != 0
564 ? pTimer->cNsInterval
565 : CY_INFINITY /* Special value, see cyclic_fire(). */;
566 pTimer->u.Single.u64Tick = 0;
567 pTimer->u.Single.nsNextTick = 0;
568
569 pTimer->hCyclicId = cyclic_add(&pTimer->u.Single.Handler, &pTimer->u.Single.FireTime);
570 if (pTimer->fSpecificCpu)
571 {
572 cyclic_bind(pTimer->hCyclicId, cpu[pTimer->iCpu], NULL /* cpupart */);
573 cyclic_reprogram(pTimer->hCyclicId, RTTimeSystemNanoTS() + u64First);
574 }
575 }
576
577 mutex_exit(&cpu_lock);
578 return VINF_SUCCESS;
579}
580
581
582/**
583 * Worker common for RTTimerStop and RTTimerDestroy.
584 *
585 * @param pTimer The timer to stop.
586 */
587static void rtTimerSolStopIt(PRTTIMER pTimer)
588{
589 mutex_enter(&cpu_lock);
590
591 pTimer->fSuspended = true;
592 if (pTimer->hCyclicId != CYCLIC_NONE)
593 {
594 cyclic_remove(pTimer->hCyclicId);
595 pTimer->hCyclicId = CYCLIC_NONE;
596 }
597 pTimer->fSuspendedFromTimer = false;
598
599 mutex_exit(&cpu_lock);
600}
601
602
603RTDECL(int) RTTimerStop(PRTTIMER pTimer)
604{
605 RTTIMER_ASSERT_VALID_RET(pTimer);
606 RT_ASSERT_INTS_ON();
607
608 if (pTimer->fSuspended)
609 return VERR_TIMER_SUSPENDED;
610
611 /* Trying the cpu_lock stuff and calling cyclic_remove may deadlock
612 the system, so just mark the timer as suspened and deal with it in
613 the callback wrapper function above. */
614 if (rtTimerSolIsCallingFromTimerProc(pTimer))
615 pTimer->fSuspendedFromTimer = true;
616 else
617 rtTimerSolStopIt(pTimer);
618
619 return VINF_SUCCESS;
620}
621
622
623RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
624{
625 /*
626 * Validate.
627 */
628 RTTIMER_ASSERT_VALID_RET(pTimer);
629 AssertReturn(u64NanoInterval > 0, VERR_INVALID_PARAMETER);
630 AssertReturn(u64NanoInterval < UINT64_MAX / 8, VERR_INVALID_PARAMETER);
631 AssertReturn(pTimer->cNsInterval, VERR_INVALID_STATE);
632
633 if (pTimer->fSuspended || pTimer->fSuspendedFromTimer)
634 pTimer->cNsInterval = u64NanoInterval;
635 else
636 {
637 ASMAtomicWriteU64(&pTimer->cNsInterval, u64NanoInterval);
638 ASMAtomicWriteBool(&pTimer->fIntervalChanged, true);
639
640 if ( !pTimer->fAllCpus
641 && !pTimer->u.Single.nsNextTick
642 && pTimer->hCyclicId != CYCLIC_NONE
643 && rtTimerSolIsCallingFromTimerProc(pTimer))
644 pTimer->u.Single.nsNextTick = RTTimeSystemNanoTS();
645 }
646
647 return VINF_SUCCESS;
648}
649
650
651RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
652{
653 return nsec_per_tick;
654}
655
656
657RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
658{
659 return VERR_NOT_SUPPORTED;
660}
661
662
663RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
664{
665 return VERR_NOT_SUPPORTED;
666}
667
668
669RTDECL(bool) RTTimerCanDoHighResolution(void)
670{
671 return true;
672}
673
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette