VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/timer-r0drv-solaris.c@ 54189

Last change on this file since 54189 was 54189, checked in by vboxsync, 10 years ago

timer-r0drv-solaris.c: Implemented changing interval and stopping timers from the callback. Fixed cyclic leak for one-shot timers.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.0 KB
Line 
1/* $Id: timer-r0drv-solaris.c 54189 2015-02-13 02:29:37Z vboxsync $ */
2/** @file
3 * IPRT - Timer, Ring-0 Driver, Solaris.
4 */
5
6/*
7 * Copyright (C) 2006-2014 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-solaris-kernel.h"
32#include "internal/iprt.h"
33#include <iprt/timer.h>
34
35#include <iprt/asm.h>
36#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
37# include <iprt/asm-amd64-x86.h>
38#endif
39#include <iprt/assert.h>
40#include <iprt/err.h>
41#include <iprt/mem.h>
42#include <iprt/mp.h>
43#include <iprt/spinlock.h>
44#include <iprt/time.h>
45#include <iprt/thread.h>
46#include "internal/magics.h"
47
48
49/*******************************************************************************
50* Structures and Typedefs *
51*******************************************************************************/
52/**
53 * The internal representation of a Solaris timer handle.
54 */
55typedef struct RTTIMER
56{
57 /** Magic.
58 * This is RTTIMER_MAGIC, but changes to something else before the timer
59 * is destroyed to indicate clearly that thread should exit. */
60 uint32_t volatile u32Magic;
61 /** Reference counter. */
62 uint32_t volatile cRefs;
63 /** Flag indicating that the timer is suspended (hCyclicId should be
64 * CYCLIC_NONE). */
65 bool volatile fSuspended;
66 /** Flag indicating that the timer was suspended from the timer callback and
67 * therefore the hCyclicId may still be valid. */
68 bool volatile fSuspendedFromTimer;
69 /** Flag indicating that the timer interval was changed and that it requires
70 * manual expiration time programming for each callout. */
71 bool volatile fIntervalChanged;
72 /** Whether the timer must run on all CPUs or not. */
73 uint8_t fAllCpus;
74 /** Whether the timer must run on a specific CPU or not. */
75 uint8_t fSpecificCpu;
76 /** The CPU it must run on if fSpecificCpu is set. */
77 uint32_t iCpu;
78 /** The nano second interval for repeating timers. */
79 uint64_t volatile cNsInterval;
80 /** Cyclic timer Id. This is CYCLIC_NONE if no active timer.
81 * @remarks Please keep in mind that cyclic may call us back before the
82 * cyclic_add/cyclic_add_omni functions returns, so don't use this
83 * unguarded with cyclic_reprogram. */
84 cyclic_id_t hCyclicId;
85 /** The user callback. */
86 PFNRTTIMER pfnTimer;
87 /** The argument for the user callback. */
88 void *pvUser;
89 /** Union with timer type specific data. */
90 union
91 {
92 /** Single timer (fAllCpus == false). */
93 struct
94 {
95 /** Cyclic handler. */
96 cyc_handler_t Handler;
97 /** Cyclic time and interval representation. */
98 cyc_time_t FireTime;
99 /** Timer ticks. */
100 uint64_t u64Tick;
101 /** The next tick when fIntervalChanged is true, otherwise 0. */
102 uint64_t nsNextTick;
103 /** The (interrupt) thread currently active in the callback. */
104 kthread_t * volatile pActiveThread;
105 } Single;
106
107 /** Omni timer (fAllCpus == true). */
108 struct
109 {
110 /** Absolute timestamp of when the timer should fire first when starting up. */
111 uint64_t u64When;
112 /** Array of per CPU data (variable size). */
113 struct
114 {
115 /** Timer ticks (reinitialized when online'd). */
116 uint64_t u64Tick;
117 /** The (interrupt) thread currently active in the callback. */
118 kthread_t * volatile pActiveThread;
119 /** The next tick when fIntervalChanged is true, otherwise 0. */
120 uint64_t nsNextTick;
121 } aPerCpu[1];
122 } Omni;
123 } u;
124} RTTIMER;
125
126
127/*******************************************************************************
128* Defined Constants And Macros *
129*******************************************************************************/
130/** Validates that the timer is valid. */
131#define RTTIMER_ASSERT_VALID_RET(pTimer) \
132 do \
133 { \
134 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE); \
135 AssertMsgReturn((pTimer)->u32Magic == RTTIMER_MAGIC, ("pTimer=%p u32Magic=%x expected %x\n", (pTimer), (pTimer)->u32Magic, RTTIMER_MAGIC), \
136 VERR_INVALID_HANDLE); \
137 } while (0)
138
139
140/*******************************************************************************
141* Internal Functions *
142*******************************************************************************/
143static void rtTimerSolSingleCallbackWrapper(void *pvArg);
144static void rtTimerSolStopIt(PRTTIMER pTimer);
145
146
147/**
148 * Retains a reference to the timer.
149 *
150 * @returns New reference counter value.
151 * @param pTimer The timer.
152 */
153DECLINLINE(uint32_t) rtTimerSolRetain(PRTTIMER pTimer)
154{
155 return ASMAtomicIncU32(&pTimer->cRefs);
156}
157
158
159/**
160 * Destroys the timer when the reference counter has reached zero.
161 *
162 * @returns 0 (new references counter value).
163 * @param pTimer The timer.
164 */
165static uint32_t rtTimeSolReleaseCleanup(PRTTIMER pTimer)
166{
167 Assert(pTimer->hCyclicId == CYCLIC_NONE);
168 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
169 RTMemFree(pTimer);
170}
171
172
173/**
174 * Releases a reference to the timer.
175 *
176 * @returns New reference counter value.
177 * @param pTimer The timer.
178 */
179DECLINLINE(uint32_t) rtTimerSolRelease(PRTTIMER pTimer)
180{
181 uint32_t cRefs = ASMAtomicDecU32(&pTimer->cRefs);
182 if (!cRefs)
183 return rtTimeSolReleaseCleanup(pTimer);
184 return cRefs;
185}
186
187
188/**
189 * RTMpOnSpecific callback used by rtTimerSolCallbackWrapper() to deal with
190 * callouts on the wrong CPU (race with cyclic_bind).
191 *
192 * @param idCpu The CPU this is fired on.
193 * @param pvUser1 Opaque pointer to the timer.
194 * @param pvUser2 Not used, NULL.
195 */
196static void rtTimerSolMpCallbackWrapper(RTCPUID idCpu, void *pvUser1, void *pvUser2)
197{
198 PRTTIMER pTimer = (PRTTIMER)pvUser1;
199 AssertPtrReturnVoid(pTimer);
200 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
201 AssertReturnVoid(pTimer->iCpu == RTMpCpuId()); /* ASSUMES: index == cpuid */
202
203 /* This avoids some code duplication. */
204 rtTimerSolSingleCallbackWrapper(pTimer);
205}
206
207
208/**
209 * Callback wrapper for single-CPU timers.
210 *
211 * @param pvArg Opaque pointer to the timer.
212 *
213 * @remarks This will be executed in interrupt context but only at the specified
214 * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the
215 * cyclic subsystem here, neither should pfnTimer().
216 */
217static void rtTimerSolSingleCallbackWrapper(void *pvArg)
218{
219 PRTTIMER pTimer = (PRTTIMER)pvArg;
220 AssertPtrReturnVoid(pTimer);
221 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
222 Assert(!pTimer->fAllCpus);
223
224 /* Make sure one-shots do not fire another time. */
225 Assert( !pTimer->fSuspended
226 || pTimer->cNsInterval != 0);
227
228 if (!pTimer->fSuspendedFromTimer)
229 {
230 /* For specific timers, we might fire on the wrong CPU between cyclic_add() and cyclic_bind().
231 Redirect these shots to the right CPU as we are temporarily rebinding to the right CPU. */
232 if (RT_UNLIKELY( pTimer->fSpecificCpu
233 && pTimer->iCpu != RTMpCpuId())) /* ASSUMES: index == cpuid */
234 {
235 RTMpOnSpecific(pTimer->iCpu, rtTimerSolMpCallbackWrapper, pTimer, NULL);
236 return;
237 }
238
239 /* For one-shot, we may allow the callback to restart them. */
240 if (pTimer->cNsInterval == 0)
241 pTimer->fSuspendedFromTimer = true;
242
243 /*
244 * Perform the callout.
245 */
246 pTimer->u.Single.pActiveThread = curthread;
247
248 uint64_t u64Tick = ++pTimer->u.Single.u64Tick;
249 pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);
250
251 pTimer->u.Single.pActiveThread = NULL;
252
253 if (RT_LIKELY(!pTimer->fSuspendedFromTimer))
254 {
255 if ( !pTimer->fIntervalChanged
256 || RT_UNLIKELY(pTimer->hCyclicId == CYCLIC_NONE))
257 return;
258
259 /*
260 * The interval was changed, we need to set the expiration time
261 * our selves before returning. This comes at a slight cost,
262 * which is why we don't do it all the time.
263 */
264 if (pTimer->u.Single.nsNextTick)
265 pTimer->u.Single.nsNextTick += ASMAtomicUoReadU64(&pTimer->cNsInterval);
266 else
267 pTimer->u.Single.nsNextTick = RTTimeSystemNanoTS() + ASMAtomicUoReadU64(&pTimer->cNsInterval);
268 cyclic_reprogram(pTimer->hCyclicId, pTimer->u.Single.nsNextTick);
269 return;
270 }
271
272 /*
273 * The timer has been suspended, set expiration time to infinitiy.
274 */
275 }
276 if (RT_LIKELY(pTimer->hCyclicId != CYCLIC_NONE))
277 cyclic_reprogram(pTimer->hCyclicId, CY_INFINITY);
278}
279
280
281/**
282 * Callback wrapper for Omni-CPU timers.
283 *
284 * @param pvArg Opaque pointer to the timer.
285 *
286 * @remarks This will be executed in interrupt context but only at the specified
287 * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the
288 * cyclic subsystem here, neither should pfnTimer().
289 */
290static void rtTimerSolOmniCallbackWrapper(void *pvArg)
291{
292 PRTTIMER pTimer = (PRTTIMER)pvArg;
293 AssertPtrReturnVoid(pTimer);
294 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
295 Assert(pTimer->fAllCpus);
296
297 if (!pTimer->fSuspendedFromTimer)
298 {
299 /*
300 * Perform the callout.
301 */
302 uint32_t const iCpu = CPU->cpu_id;
303
304 pTimer->u.Omni.aPerCpu[iCpu].pActiveThread = curthread;
305 uint64_t u64Tick = ++pTimer->u.Omni.aPerCpu[iCpu].u64Tick;
306
307 pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);
308
309 pTimer->u.Omni.aPerCpu[iCpu].pActiveThread = NULL;
310
311 if (RT_LIKELY(!pTimer->fSuspendedFromTimer))
312 {
313 if ( !pTimer->fIntervalChanged
314 || RT_UNLIKELY(pTimer->hCyclicId == CYCLIC_NONE))
315 return;
316
317 /*
318 * The interval was changed, we need to set the expiration time
319 * our selves before returning. This comes at a slight cost,
320 * which is why we don't do it all the time.
321 *
322 * Note! The cyclic_reprogram call only affects the omni cyclic
323 * component for this CPU.
324 */
325 if (pTimer->u.Omni.aPerCpu[iCpu].nsNextTick)
326 pTimer->u.Omni.aPerCpu[iCpu].nsNextTick += ASMAtomicUoReadU64(&pTimer->cNsInterval);
327 else
328 pTimer->u.Omni.aPerCpu[iCpu].nsNextTick = RTTimeSystemNanoTS() + ASMAtomicUoReadU64(&pTimer->cNsInterval);
329 cyclic_reprogram(pTimer->hCyclicId, pTimer->u.Omni.aPerCpu[iCpu].nsNextTick);
330 return;
331 }
332
333 /*
334 * The timer has been suspended, set expiration time to infinitiy.
335 */
336 }
337 if (RT_LIKELY(pTimer->hCyclicId != CYCLIC_NONE))
338 cyclic_reprogram(pTimer->hCyclicId, CY_INFINITY);
339}
340
341
342/**
343 * Omni-CPU cyclic online event. This is called before the omni cycle begins to
344 * fire on the specified CPU.
345 *
346 * @param pvArg Opaque pointer to the timer.
347 * @param pCpu Pointer to the CPU on which it will fire.
348 * @param pCyclicHandler Pointer to a cyclic handler to add to the CPU
349 * specified in @a pCpu.
350 * @param pCyclicTime Pointer to the cyclic time and interval object.
351 *
352 * @remarks We -CANNOT- call back into the cyclic subsystem here, we can however
353 * block (sleep).
354 */
355static void rtTimerSolOmniCpuOnline(void *pvArg, cpu_t *pCpu, cyc_handler_t *pCyclicHandler, cyc_time_t *pCyclicTime)
356{
357 PRTTIMER pTimer = (PRTTIMER)pvArg;
358 AssertPtrReturnVoid(pTimer);
359 AssertPtrReturnVoid(pCpu);
360 AssertPtrReturnVoid(pCyclicHandler);
361 AssertPtrReturnVoid(pCyclicTime);
362 uint32_t const iCpu = pCpu->cpu_id; /* Note! CPU is not necessarily the same as pCpu. */
363
364 pTimer->u.Omni.aPerCpu[iCpu].u64Tick = 0;
365 pTimer->u.Omni.aPerCpu[iCpu].nsNextTick = 0;
366
367 pCyclicHandler->cyh_func = (cyc_func_t)rtTimerSolOmniCallbackWrapper;
368 pCyclicHandler->cyh_arg = pTimer;
369 pCyclicHandler->cyh_level = CY_LOCK_LEVEL;
370
371 uint64_t u64Now = RTTimeSystemNanoTS();
372 if (pTimer->u.Omni.u64When < u64Now)
373 pCyclicTime->cyt_when = u64Now + pTimer->cNsInterval / 2;
374 else
375 pCyclicTime->cyt_when = pTimer->u.Omni.u64When;
376
377 pCyclicTime->cyt_interval = pTimer->cNsInterval;
378}
379
380
381RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
382{
383 RT_ASSERT_PREEMPTIBLE();
384 *ppTimer = NULL;
385
386 /*
387 * Validate flags.
388 */
389 if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
390 return VERR_INVALID_PARAMETER;
391
392 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
393 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
394 && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
395 return VERR_CPU_NOT_FOUND;
396
397 /* One-shot omni timers are not supported by the cyclic system. */
398 if ( (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL
399 && u64NanoInterval == 0)
400 return VERR_NOT_SUPPORTED;
401
402 /*
403 * Allocate and initialize the timer handle. The omni variant has a
404 * variable sized array of ticks counts, thus the size calculation.
405 */
406 PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ( (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL
407 ? RT_OFFSETOF(RTTIMER, u.Omni.aPerCpu[RTMpGetCount()])
408 : sizeof(RTTIMER));
409 if (!pTimer)
410 return VERR_NO_MEMORY;
411
412 pTimer->u32Magic = RTTIMER_MAGIC;
413 pTimer->cRefs = 1;
414 pTimer->fSuspended = true;
415 pTimer->fSuspendedFromTimer = false;
416 pTimer->fIntervalChanged = false;
417 if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
418 {
419 pTimer->fAllCpus = true;
420 pTimer->fSpecificCpu = false;
421 pTimer->iCpu = UINT32_MAX;
422 }
423 else if (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
424 {
425 pTimer->fAllCpus = false;
426 pTimer->fSpecificCpu = true;
427 pTimer->iCpu = fFlags & RTTIMER_FLAGS_CPU_MASK; /* ASSUMES: index == cpuid */
428 }
429 else
430 {
431 pTimer->fAllCpus = false;
432 pTimer->fSpecificCpu = false;
433 pTimer->iCpu = UINT32_MAX;
434 }
435 pTimer->cNsInterval = u64NanoInterval;
436 pTimer->pfnTimer = pfnTimer;
437 pTimer->pvUser = pvUser;
438 pTimer->hCyclicId = CYCLIC_NONE;
439
440 *ppTimer = pTimer;
441 return VINF_SUCCESS;
442}
443
444
445/**
446 * Checks if the calling thread is currently executing the timer proceduce for
447 * the given timer.
448 *
449 * @returns true if it is, false if it isn't.
450 * @param pTimer The timer in question.
451 */
452DECLINLINE(bool) rtTimerSolIsCallingFromTimerProc(PRTTIMER pTimer)
453{
454 kthread_t *pCurThread = curthread;
455 AssertReturn(pCurThread, false); /* serious paranoia */
456
457 if (!pTimer->fAllCpus)
458 return pTimer->u.Single.pActiveThread == pCurThread;
459 return pTimer->u.Omni.aPerCpu[CPU->cpu_id].pActiveThread == pCurThread;
460}
461
462
463RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
464{
465 if (pTimer == NULL)
466 return VINF_SUCCESS;
467 RTTIMER_ASSERT_VALID_RET(pTimer);
468 RT_ASSERT_INTS_ON();
469
470 /*
471 * It is not possible to destroy a timer from it's callback function.
472 * Cyclic makes that impossible (or at least extremely risky).
473 */
474 AssertReturn(!rtTimerSolIsCallingFromTimerProc(pTimer), VERR_INVALID_CONTEXT);
475
476 /*
477 * Invalidate the handle, make sure it's stopped nad free the associated resources.
478 */
479 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
480
481 if ( !pTimer->fSuspended
482 || pTimer->hCyclicId != CYCLIC_NONE) /* 2nd check shouldn't happen */
483 rtTimerSolStopIt(pTimer);
484
485 rtTimerSolRelease(pTimer);
486 return VINF_SUCCESS;
487}
488
489
490RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
491{
492 RTTIMER_ASSERT_VALID_RET(pTimer);
493 RT_ASSERT_INTS_ON();
494
495 /*
496 * It's not possible to restart a one-shot time from it's callback function,
497 * at least not at the moment.
498 */
499 AssertReturn(!rtTimerSolIsCallingFromTimerProc(pTimer), VERR_INVALID_CONTEXT);
500
501
502 mutex_enter(&cpu_lock);
503
504 /*
505 * Make sure it's not active already. If it was suspended from a timer
506 * callback function, we need to do some cleanup work here before we can
507 * restart the timer.
508 */
509 if (!pTimer->fSuspended)
510 {
511 if (!pTimer->fSuspendedFromTimer)
512 {
513 mutex_exit(&cpu_lock);
514 return VERR_TIMER_ACTIVE;
515 }
516 cyclic_remove(pTimer->hCyclicId);
517 pTimer->hCyclicId = CYCLIC_NONE;
518 }
519
520 pTimer->fSuspended = false;
521 pTimer->fSuspendedFromTimer = false;
522 pTimer->fIntervalChanged = false;
523 if (pTimer->fAllCpus)
524 {
525 /*
526 * Setup omni (all CPU) timer. The Omni-CPU online event will fire
527 * and from there we setup periodic timers per CPU.
528 */
529 pTimer->u.Omni.u64When = RTTimeSystemNanoTS() + (u64First ? u64First : pTimer->cNsInterval);
530
531 cyc_omni_handler_t HandlerOmni;
532 HandlerOmni.cyo_online = rtTimerSolOmniCpuOnline;
533 HandlerOmni.cyo_offline = NULL;
534 HandlerOmni.cyo_arg = pTimer;
535
536 pTimer->hCyclicId = cyclic_add_omni(&HandlerOmni);
537 }
538 else
539 {
540 /*
541 * Setup a single CPU timer. If a specific CPU was requested, it
542 * must be online or the timer cannot start.
543 */
544 if ( pTimer->fSpecificCpu
545 && !RTMpIsCpuOnline(pTimer->iCpu)) /* ASSUMES: index == cpuid */
546 {
547 pTimer->fSuspended = true;
548
549 mutex_exit(&cpu_lock);
550 return VERR_CPU_OFFLINE;
551 }
552
553 /** @todo we probably don't need to have cyc_handler_t and cyc_time_t in the
554 * timer structure... */
555 pTimer->u.Single.Handler.cyh_func = (cyc_func_t)rtTimerSolSingleCallbackWrapper;
556 pTimer->u.Single.Handler.cyh_arg = pTimer;
557 pTimer->u.Single.Handler.cyh_level = CY_LOCK_LEVEL;
558
559 pTimer->u.Single.FireTime.cyt_when = RTTimeSystemNanoTS() + u64First;
560 pTimer->u.Single.FireTime.cyt_interval = pTimer->cNsInterval != 0
561 ? pTimer->cNsInterval
562 : CY_INFINITY /* Special value, see cyclic_fire. */;
563 pTimer->u.Single.u64Tick = 0;
564 pTimer->u.Single.nsNextTick = 0;
565
566 pTimer->hCyclicId = cyclic_add(&pTimer->u.Single.Handler, &pTimer->u.Single.FireTime);
567 if (pTimer->fSpecificCpu)
568 cyclic_bind(pTimer->hCyclicId, cpu[pTimer->iCpu], NULL /* cpupart */);
569 }
570
571 mutex_exit(&cpu_lock);
572 return VINF_SUCCESS;
573}
574
575
576/**
577 * Worker common for RTTimerStop and RTTimerDestroy.
578 *
579 * @param pTimer The timer to stop.
580 */
581static void rtTimerSolStopIt(PRTTIMER pTimer)
582{
583 mutex_enter(&cpu_lock);
584
585 pTimer->fSuspended = true;
586 if (pTimer->hCyclicId != CYCLIC_NONE)
587 {
588 cyclic_remove(pTimer->hCyclicId);
589 pTimer->hCyclicId = CYCLIC_NONE;
590 }
591 pTimer->fSuspendedFromTimer = false;
592
593 mutex_exit(&cpu_lock);
594}
595
596
597RTDECL(int) RTTimerStop(PRTTIMER pTimer)
598{
599 RTTIMER_ASSERT_VALID_RET(pTimer);
600 RT_ASSERT_INTS_ON();
601
602 if (pTimer->fSuspended)
603 return VERR_TIMER_SUSPENDED;
604
605 /* Trying the cpu_lock stuff and calling cyclic_remove may deadlock
606 the system, so just mark the timer as suspened and deal with it in
607 the callback wrapper function above. */
608 if (rtTimerSolIsCallingFromTimerProc(pTimer))
609 pTimer->fSuspendedFromTimer = true;
610 else
611 rtTimerSolStopIt(pTimer);
612
613 return VINF_SUCCESS;
614}
615
616
617RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
618{
619 /*
620 * Validate.
621 */
622 RTTIMER_ASSERT_VALID_RET(pTimer);
623 AssertReturn(u64NanoInterval > 0, VERR_INVALID_PARAMETER);
624 AssertReturn(u64NanoInterval < UINT64_MAX / 8, VERR_INVALID_PARAMETER);
625 AssertReturn(pTimer->cNsInterval, VERR_INVALID_STATE);
626
627 if (pTimer->fSuspended || pTimer->fSuspendedFromTimer)
628 pTimer->cNsInterval = u64NanoInterval;
629 else
630 {
631 ASMAtomicWriteU64(&pTimer->cNsInterval, u64NanoInterval);
632 ASMAtomicWriteBool(&pTimer->fIntervalChanged, true);
633
634 if ( !pTimer->fAllCpus
635 && !pTimer->u.Single.nsNextTick
636 && pTimer->hCyclicId != CYCLIC_NONE
637 && rtTimerSolIsCallingFromTimerProc(pTimer))
638 pTimer->u.Single.nsNextTick = RTTimeSystemNanoTS();
639 }
640
641 return VINF_SUCCESS;
642}
643
644
645RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
646{
647 return nsec_per_tick;
648}
649
650
651RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
652{
653 return VERR_NOT_SUPPORTED;
654}
655
656
657RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
658{
659 return VERR_NOT_SUPPORTED;
660}
661
662
663RTDECL(bool) RTTimerCanDoHighResolution(void)
664{
665 /** @todo return true; - when missing bits have been implemented and tested*/
666 return false;
667}
668
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette