VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/timer-r0drv-solaris.c@ 54183

Last change on this file since 54183 was 54183, checked in by vboxsync, 10 years ago

timer-r0drv-solaris.c: Cleanups. Got rid of 2-3 allocations in RTTimerStart, by embedding the data into the timer structure itself and allocated it all in one go in RTTimerCreate.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 15.5 KB
Line 
1/* $Id: timer-r0drv-solaris.c 54183 2015-02-12 20:50:29Z vboxsync $ */
2/** @file
3 * IPRT - Timer, Ring-0 Driver, Solaris.
4 */
5
6/*
7 * Copyright (C) 2006-2014 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-solaris-kernel.h"
32#include "internal/iprt.h"
33#include <iprt/timer.h>
34
35#include <iprt/asm.h>
36#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
37# include <iprt/asm-amd64-x86.h>
38#endif
39#include <iprt/assert.h>
40#include <iprt/err.h>
41#include <iprt/mem.h>
42#include <iprt/mp.h>
43#include <iprt/spinlock.h>
44#include <iprt/time.h>
45#include <iprt/thread.h>
46#include "internal/magics.h"
47
48#define SOL_TIMER_ANY_CPU (-1)
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53/**
54 * The internal representation of a Solaris timer handle.
55 */
56typedef struct RTTIMER
57{
58 /** Magic.
59 * This is RTTIMER_MAGIC, but changes to something else before the timer
60 * is destroyed to indicate clearly that thread should exit. */
61 uint32_t volatile u32Magic;
62 /** Reference counter. */
63 uint32_t volatile cRefs;
64 /** Flag indicating that the timer is suspended. */
65 uint8_t volatile fSuspended;
66 /** Whether the timer must run on all CPUs or not. */
67 uint8_t fAllCpus;
68 /** Whether the timer must run on a specific CPU or not. */
69 uint8_t fSpecificCpu;
70 /** The CPU it must run on if fSpecificCpu is set. */
71 uint8_t iCpu;
72 /** The nano second interval for repeating timers. */
73 uint64_t cNsInterval;
74 /** Cyclic timer Id. */
75 cyclic_id_t hCyclicId;
76 /** The user callback. */
77 PFNRTTIMER pfnTimer;
78 /** The argument for the user callback. */
79 void *pvUser;
80 /** Union with timer type specific data. */
81 union
82 {
83 /** Single timer (fAllCpus == false). */
84 struct
85 {
86 /** Cyclic handler. */
87 cyc_handler_t hHandler;
88 /** Cyclic time and interval representation. */
89 cyc_time_t hFireTime;
90 /** Timer ticks. */
91 uint64_t u64Tick;
92 } Single;
93
94 /** Omni timer (fAllCpus == true). */
95 struct
96 {
97 /** Absolute timestamp of when the timer should fire next. */
98 uint64_t u64When;
99 /** Array of timer ticks per CPU. Reinitialized when a CPU is online'd
100 * (variable size). */
101 uint64_t au64Ticks[1];
102 } Omni;
103 } u;
104} RTTIMER;
105
106
107/*******************************************************************************
108* Defined Constants And Macros *
109*******************************************************************************/
110/** Validates that the timer is valid. */
111#define RTTIMER_ASSERT_VALID_RET(pTimer) \
112 do \
113 { \
114 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE); \
115 AssertMsgReturn((pTimer)->u32Magic == RTTIMER_MAGIC, ("pTimer=%p u32Magic=%x expected %x\n", (pTimer), (pTimer)->u32Magic, RTTIMER_MAGIC), \
116 VERR_INVALID_HANDLE); \
117 } while (0)
118
119
120
121/**
122 * Retains a reference to the timer.
123 *
124 * @returns New reference counter value.
125 * @param pTimer The timer.
126 */
127DECLINLINE(uint32_t) rtTimerSolRetain(PRTTIMER pTimer)
128{
129 return ASMAtomicIncU32(&pTimer->cRefs);
130}
131
132
133/**
134 * Destroys the timer when the reference counter has reached zero.
135 *
136 * @returns 0 (new references counter value).
137 * @param pTimer The timer.
138 */
139static uint32_t rtTimeSolReleaseCleanup(PRTTIMER pTimer)
140{
141 Assert(pTimer->hCyclicId == CYCLIC_NONE);
142 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
143 RTMemFree(pTimer);
144}
145
146
147/**
148 * Releases a reference to the timer.
149 *
150 * @returns New reference counter value.
151 * @param pTimer The timer.
152 */
153DECLINLINE(uint32_t) rtTimerSolRelease(PRTTIMER pTimer)
154{
155 uint32_t cRefs = ASMAtomicDecU32(&pTimer->cRefs);
156 if (!cRefs)
157 return rtTimeSolReleaseCleanup(pTimer);
158 return cRefs;
159}
160
161
162/**
163 * RTMpOnSpecific callback used by rtTimerSolCallbackWrapper() to deal with
164 * callouts on the wrong CPU (race with cyclic_bind).
165 *
166 * @param idCpu The CPU this is fired on.
167 * @param pvUser1 Opaque pointer to the timer.
168 * @param pvUser2 Not used, NULL.
169 */
170static void rtTimerSolMpCallbackWrapper(RTCPUID idCpu, void *pvUser1, void *pvUser2)
171{
172 PRTTIMER pTimer = (PRTTIMER)pvUser1;
173 AssertPtrReturnVoid(pTimer);
174 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
175 Assert(pTimer->iCpu == RTMpCpuId()); /* ASSUMES: index == cpuid */
176 Assert(!pTimer->fAllCpus);
177 NOREF(pvUser2);
178
179 /* Make sure one-shots do not fire another time. */
180 Assert( !pTimer->fSuspended
181 || pTimer->cNsInterval != 0);
182
183 /* For one-shot specific timers, allow RTTimer to restart them. */
184 if (pTimer->cNsInterval == 0)
185 pTimer->fSuspended = true;
186
187 uint64_t u64Tick = ++pTimer->u.Single.u64Tick;
188 pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);
189}
190
191
192/**
193 * Callback wrapper for single-CPU timers.
194 *
195 * @param pvArg Opaque pointer to the timer.
196 *
197 * @remarks This will be executed in interrupt context but only at the specified
198 * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the
199 * cyclic subsystem here, neither should pfnTimer().
200 */
201static void rtTimerSolSingleCallbackWrapper(void *pvArg)
202{
203 PRTTIMER pTimer = (PRTTIMER)pvArg;
204 AssertPtrReturnVoid(pTimer);
205 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
206 Assert(!pTimer->fAllCpus);
207
208 /* Make sure one-shots do not fire another time. */
209 Assert( !pTimer->fSuspended
210 || pTimer->cNsInterval != 0);
211
212 /* For specific timers, we might fire on the wrong CPU between cyclic_add() and cyclic_bind().
213 Redirect these shots to the right CPU as we are temporarily rebinding to the right CPU. */
214 if ( pTimer->fSpecificCpu
215 && pTimer->iCpu != RTMpCpuId()) /* ASSUMES: index == cpuid */
216 {
217 RTMpOnSpecific(pTimer->iCpu, rtTimerSolMpCallbackWrapper, pTimer, NULL);
218 return;
219 }
220
221 /* For one-shot any-cpu timers, allow RTTimer to restart them. */
222 if (pTimer->cNsInterval == 0)
223 pTimer->fSuspended = true;
224
225 uint64_t u64Tick = ++pTimer->u.Single.u64Tick;
226 pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);
227}
228
229
230/**
231 * Callback wrapper for Omni-CPU timers.
232 *
233 * @param pvArg Opaque pointer to the timer.
234 *
235 * @remarks This will be executed in interrupt context but only at the specified
236 * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the
237 * cyclic subsystem here, neither should pfnTimer().
238 */
239static void rtTimerSolOmniCallbackWrapper(void *pvArg)
240{
241 PRTTIMER pTimer = (PRTTIMER)pvArg;
242 AssertPtrReturnVoid(pTimer);
243 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
244 Assert(pTimer->fAllCpus);
245
246 uint64_t u64Tick = ++pTimer->u.Omni.au64Ticks[CPU->cpu_id];
247 pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick);
248}
249
250
251/**
252 * Omni-CPU cyclic online event. This is called before the omni cycle begins to
253 * fire on the specified CPU.
254 *
255 * @param pvArg Opaque pointer to the timer.
256 * @param pCpu Pointer to the CPU on which it will fire.
257 * @param pCyclicHandler Pointer to a cyclic handler to add to the CPU
258 * specified in @a pCpu.
259 * @param pCyclicTime Pointer to the cyclic time and interval object.
260 *
261 * @remarks We -CANNOT- call back into the cyclic subsystem here, we can however
262 * block (sleep).
263 */
264static void rtTimerSolOmniCpuOnline(void *pvArg, cpu_t *pCpu, cyc_handler_t *pCyclicHandler, cyc_time_t *pCyclicTime)
265{
266 PRTTIMER pTimer = (PRTTIMER)pvArg;
267 AssertPtrReturnVoid(pTimer);
268 AssertPtrReturnVoid(pCpu);
269 AssertPtrReturnVoid(pCyclicHandler);
270 AssertPtrReturnVoid(pCyclicTime);
271
272 pTimer->u.Omni.au64Ticks[pCpu->cpu_id] = 0;
273 pCyclicHandler->cyh_func = (cyc_func_t)rtTimerSolOmniCallbackWrapper;
274 pCyclicHandler->cyh_arg = pTimer;
275 pCyclicHandler->cyh_level = CY_LOCK_LEVEL;
276
277 uint64_t u64Now = RTTimeSystemNanoTS();
278 if (pTimer->u.Omni.u64When < u64Now)
279 pCyclicTime->cyt_when = u64Now + pTimer->cNsInterval / 2;
280 else
281 pCyclicTime->cyt_when = pTimer->u.Omni.u64When;
282
283 pCyclicTime->cyt_interval = pTimer->cNsInterval;
284}
285
286
287RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
288{
289 RT_ASSERT_PREEMPTIBLE();
290 *ppTimer = NULL;
291
292 /*
293 * Validate flags.
294 */
295 if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
296 return VERR_INVALID_PARAMETER;
297
298 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
299 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
300 && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
301 return VERR_CPU_NOT_FOUND;
302
303 /* One-shot omni timers are not supported by the cyclic system. */
304 if ( (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL
305 && u64NanoInterval == 0)
306 return VERR_NOT_SUPPORTED;
307
308 /*
309 * Allocate and initialize the timer handle. The omni variant has a
310 * variable sized array of ticks counts, thus the size calculation.
311 */
312 PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ( (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL
313 ? RT_OFFSETOF(RTTIMER, u.Omni.au64Ticks[RTMpGetCount()])
314 : sizeof(RTTIMER));
315 if (!pTimer)
316 return VERR_NO_MEMORY;
317
318 pTimer->u32Magic = RTTIMER_MAGIC;
319 pTimer->cRefs = 1;
320 pTimer->fSuspended = true;
321 if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
322 {
323 pTimer->fAllCpus = true;
324 pTimer->fSpecificCpu = false;
325 pTimer->iCpu = 255;
326 }
327 else if (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
328 {
329 pTimer->fAllCpus = false;
330 pTimer->fSpecificCpu = true;
331 pTimer->iCpu = fFlags & RTTIMER_FLAGS_CPU_MASK; /* ASSUMES: index == cpuid */
332 }
333 else
334 {
335 pTimer->fAllCpus = false;
336 pTimer->fSpecificCpu = false;
337 pTimer->iCpu = 255;
338 }
339 pTimer->cNsInterval = u64NanoInterval;
340 pTimer->pfnTimer = pfnTimer;
341 pTimer->pvUser = pvUser;
342 pTimer->hCyclicId = CYCLIC_NONE;
343
344 *ppTimer = pTimer;
345 return VINF_SUCCESS;
346}
347
348
349RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
350{
351 if (pTimer == NULL)
352 return VINF_SUCCESS;
353 RTTIMER_ASSERT_VALID_RET(pTimer);
354 RT_ASSERT_INTS_ON();
355
356 /*
357 * Free the associated resources.
358 */
359 RTTimerStop(pTimer);
360 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
361
362 rtTimerSolRelease(pTimer);
363 return VINF_SUCCESS;
364}
365
366
367RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
368{
369 RTTIMER_ASSERT_VALID_RET(pTimer);
370 RT_ASSERT_INTS_ON();
371
372 if (!pTimer->fSuspended)
373 return VERR_TIMER_ACTIVE;
374
375 pTimer->fSuspended = false;
376 if (pTimer->fAllCpus)
377 {
378 /*
379 * Setup omni (all CPU) timer. The Omni-CPU online event will fire
380 * and from there we setup periodic timers per CPU.
381 */
382 pTimer->u.Omni.u64When = pTimer->cNsInterval + RTTimeSystemNanoTS();
383
384 cyc_omni_handler_t HandlerOmni;
385 HandlerOmni.cyo_online = rtTimerSolOmniCpuOnline;
386 HandlerOmni.cyo_offline = NULL;
387 HandlerOmni.cyo_arg = pTimer;
388
389 mutex_enter(&cpu_lock);
390 pTimer->hCyclicId = cyclic_add_omni(&HandlerOmni);
391 mutex_exit(&cpu_lock);
392 }
393 else
394 {
395 int iCpu = SOL_TIMER_ANY_CPU;
396 if (pTimer->fSpecificCpu)
397 {
398 iCpu = pTimer->iCpu;
399 if (!RTMpIsCpuOnline(iCpu)) /* ASSUMES: index == cpuid */
400 return VERR_CPU_OFFLINE;
401 }
402
403 pTimer->u.Single.hHandler.cyh_func = (cyc_func_t)rtTimerSolSingleCallbackWrapper;
404 pTimer->u.Single.hHandler.cyh_arg = pTimer;
405 pTimer->u.Single.hHandler.cyh_level = CY_LOCK_LEVEL;
406
407 mutex_enter(&cpu_lock);
408 if (RT_UNLIKELY( iCpu != SOL_TIMER_ANY_CPU
409 && !cpu_is_online(cpu[iCpu])))
410 {
411 mutex_exit(&cpu_lock);
412 return VERR_CPU_OFFLINE;
413 }
414
415 pTimer->u.Single.hFireTime.cyt_when = u64First + RTTimeSystemNanoTS();
416 if (pTimer->cNsInterval == 0)
417 {
418 /*
419 * cylic_add() comment: "The caller is responsible for assuring that cyt_when + cyt_interval <= INT64_MAX"
420 * but it contradicts itself because cyclic_reprogram() updates only the interval and accepts CY_INFINITY as
421 * a valid, special value. See cyclic_fire().
422 */
423 pTimer->u.Single.hFireTime.cyt_interval = CY_INFINITY;
424 }
425 else
426 pTimer->u.Single.hFireTime.cyt_interval = pTimer->cNsInterval;
427
428 pTimer->hCyclicId = cyclic_add(&pTimer->u.Single.hHandler, &pTimer->u.Single.hFireTime);
429 if (iCpu != SOL_TIMER_ANY_CPU)
430 cyclic_bind(pTimer->hCyclicId, cpu[iCpu], NULL /* cpupart */);
431
432 mutex_exit(&cpu_lock);
433 }
434
435 return VINF_SUCCESS;
436}
437
438
439RTDECL(int) RTTimerStop(PRTTIMER pTimer)
440{
441 RTTIMER_ASSERT_VALID_RET(pTimer);
442 RT_ASSERT_INTS_ON();
443
444 if (pTimer->fSuspended)
445 return VERR_TIMER_SUSPENDED;
446
447 /** @remarks Do -not- call this function from a timer callback,
448 * cyclic_remove() will deadlock the system. */
449 mutex_enter(&cpu_lock);
450
451 pTimer->fSuspended = true;
452 cyclic_remove(pTimer->hCyclicId);
453 pTimer->hCyclicId = CYCLIC_NONE;
454
455 mutex_exit(&cpu_lock);
456
457 return VINF_SUCCESS;
458}
459
460
461RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
462{
463 /*
464 * Validate.
465 */
466 RTTIMER_ASSERT_VALID_RET(pTimer);
467 AssertReturn(u64NanoInterval, VERR_INVALID_PARAMETER);
468
469 if (pTimer->fSuspended)
470 {
471 pTimer->cNsInterval = u64NanoInterval;
472 return VINF_SUCCESS;
473 }
474
475 return VERR_NOT_SUPPORTED;
476}
477
478
479RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
480{
481 return nsec_per_tick;
482}
483
484
485RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
486{
487 return VERR_NOT_SUPPORTED;
488}
489
490
491RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
492{
493 return VERR_NOT_SUPPORTED;
494}
495
496
497RTDECL(bool) RTTimerCanDoHighResolution(void)
498{
499 /** @todo return true; - when missing bits have been implemented and tested*/
500 return false;
501}
502
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette