VirtualBox

Changeset 9371 in vbox


Ignore:
Timestamp:
Jun 3, 2008 10:46:03 PM (17 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
31594
Message:

quick reorg.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Runtime/r0drv/linux/timer-r0drv-linux.c

    r9370 r9371  
    166166
    167167
    168 /*******************************************************************************
    169 *   Internal Functions                                                         *
    170 *******************************************************************************/
    171 #ifdef RT_USE_LINUX_HRTIMER
    172 static enum hrtimer_restart rtTimerLinuxCallback(struct hrtimer *pHrTimer);
    173 #else
    174 static void rtTimerLinuxCallback(unsigned long ulUser);
    175 #endif
    176 #ifdef CONFIG_SMP
    177 static int rtTimerLnxStartAll(PRTTIMER pTimer, PRTTIMERLINUXSTARTONCPUARGS pArgs);
    178 static int rtTimerLnxStopAll(PRTTIMER pTimer);
    179 static DECLCALLBACK(void) rtTimerLinuxMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser);
    180 #endif
    181 
    182 
    183 RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
    184 {
    185 #ifdef RT_USE_LINUX_HRTIMER
    186     /** @todo later... */
    187     return 1000000000 / HZ; /* ns */
    188 #else
    189     return 1000000000 / HZ; /* ns */
    190 #endif
    191 }
    192 
    193 
    194 RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
    195 {
    196     return VERR_NOT_SUPPORTED;
    197 }
    198 
    199 
    200 RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
    201 {
    202     return VERR_NOT_SUPPORTED;
    203 }
    204 
    205 
    206 RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, unsigned fFlags, PFNRTTIMER pfnTimer, void *pvUser)
    207 {
    208     PRTTIMER pTimer;
    209     RTCPUID  iCpu;
    210     unsigned cCpus;
    211 
    212     *ppTimer = NULL;
    213 
    214     /*
    215      * Validate flags.
    216      */
    217     if (!RTTIMER_FLAGS_IS_VALID(fFlags))
    218         return VERR_INVALID_PARAMETER;
    219     if (    (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
    220         &&  (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
    221         &&  !RTMpIsCpuOnline(fFlags & RTTIMER_FLAGS_CPU_MASK))
    222         return (fFlags & RTTIMER_FLAGS_CPU_MASK) > RTMpGetMaxCpuId()
    223              ? VERR_CPU_NOT_FOUND
    224              : VERR_CPU_OFFLINE;
    225 
    226     /*
    227      * Allocate the timer handler.
    228      */
    229     cCpus = 1;
    230 #ifdef CONFIG_SMP
    231     if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
    232     {
    233         cCpus = RTMpGetMaxCpuId() + 1;
    234         Assert(cCpus <= RTCPUSET_MAX_CPUS); /* On linux we have a 1:1 relationship between cpuid and set index. */
    235         AssertReturn(u64NanoInterval, VERR_NOT_IMPLEMENTED); /* We don't implement single shot on all cpus, sorry. */
    236     }
    237 #endif
    238 
    239     pTimer = (PRTTIMER)RTMemAllocZ(RT_OFFSETOF(RTTIMER, aSubTimers[cCpus]));
    240     if (!pTimer)
    241         return VERR_NO_MEMORY;
    242 
    243     /*
    244      * Initialize it.
    245      */
    246     pTimer->u32Magic = RTTIMER_MAGIC;
    247     pTimer->hSpinlock = NIL_RTSPINLOCK;
    248     pTimer->fSuspended = true;
    249 #ifdef CONFIG_SMP
    250     pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL;
    251     pTimer->fAllCpus = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL;
    252     pTimer->idCpu = fFlags & RTTIMER_FLAGS_CPU_MASK;
    253 #else
    254     pTimer->fSpecificCpu = !!(fFlags & RTTIMER_FLAGS_CPU_SPECIFIC);
    255     pTimer->idCpu = RTMpCpuId();
    256 #endif
    257     pTimer->cCpus = cCpus;
    258     pTimer->pfnTimer = pfnTimer;
    259     pTimer->pvUser = pvUser;
    260     pTimer->u64NanoInterval = u64NanoInterval;
    261 
    262     for (iCpu = 0; iCpu < cCpus; iCpu++)
    263     {
    264 #ifdef RT_USE_LINUX_HRTIMER
    265         hrtimer_init(&pTimer->aSubTimers[iCpu].LnxTimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
    266         pTimer->aSubTimers[iCpu].LnxTimer.function = rtTimerLinuxCallback;
    267 #else
    268         init_timer(&pTimer->aSubTimers[iCpu].LnxTimer);
    269         pTimer->aSubTimers[iCpu].LnxTimer.data     = (unsigned long)pTimer;
    270         pTimer->aSubTimers[iCpu].LnxTimer.function = rtTimerLinuxCallback;
    271         pTimer->aSubTimers[iCpu].LnxTimer.expires  = jiffies;
    272 #endif
    273         pTimer->aSubTimers[iCpu].u64StartTS = 0;
    274         pTimer->aSubTimers[iCpu].u64NextTS = 0;
    275         pTimer->aSubTimers[iCpu].iTick = 0;
    276         pTimer->aSubTimers[iCpu].pParent = pTimer;
    277         pTimer->aSubTimers[iCpu].enmState = RTTIMERLNXSTATE_STOPPED;
    278     }
    279 
    280 #ifdef CONFIG_SMP
    281     /*
    282      * If this is running on ALL cpus, we'll have to register a callback
    283      * for MP events (so timers can be started/stopped on cpus going
    284      * online/offline). We also create the spinlock for syncrhonizing
    285      * stop/start/mp-event.
    286      */
    287     if (cCpus > 1)
    288     {
    289         int rc = RTSpinlockCreate(&pTimer->hSpinlock);
    290         if (RT_SUCCESS(rc))
    291             rc = RTMpNotificationRegister(rtTimerLinuxMpEvent, pTimer);
    292         else
    293             pTimer->hSpinlock = NIL_RTSPINLOCK;
    294         if (RT_FAILURE(rc))
    295         {
    296             RTTimerDestroy(pTimer);
    297             return rc;
    298         }
    299     }
    300 #endif /* CONFIG_SMP */
    301 
    302     *ppTimer = pTimer;
    303     return VINF_SUCCESS;
    304 }
    305 
    306 
    307 RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
    308 {
    309     RTSPINLOCK hSpinlock;
    310 
    311     /* It's ok to pass NULL pointer. */
    312     if (pTimer == /*NIL_RTTIMER*/ NULL)
    313         return VINF_SUCCESS;
    314     AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
    315     AssertPtrReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
    316 
    317     /*
    318      * Remove the MP notifications first because it'll reduce the risk of
    319      * us overtaking any MP event that might theoretically be racing us here.
    320      */
    321     hSpinlock = pTimer->hSpinlock;
    322 #ifdef CONFIG_SMP
    323     if (    pTimer->cCpus > 1
    324         &&  hSpinlock != NIL_RTSPINLOCK)
    325     {
    326         int rc = RTMpNotificationDeregister(rtTimerLinuxMpEvent, pTimer);
    327         AssertRC(rc);
    328     }
    329 #endif /* CONFIG_SMP */
    330 
    331     /*
    332      * Stop the timer if it's running.
    333      */
    334     if (!ASMAtomicUoReadBool(&pTimer->fSuspended)) /* serious paranoia */
    335         RTTimerStop(pTimer);
    336 
    337     /*
    338      * Uninitialize the structure and free the associated resources.
    339      * The spinlock goes last.
    340      */
    341     ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
    342     RTMemFree(pTimer);
    343     if (hSpinlock != NIL_RTSPINLOCK)
    344         RTSpinlockDestroy(hSpinlock);
    345 
    346     return VINF_SUCCESS;
    347 }
    348 
    349 
    350168/**
    351169 * Sets the state.
     
    430248    rtTimerLnxSetState(&pSubTimer->enmState, RTTIMERLNXSTATE_STOPPED);
    431249}
    432 
    433 
    434 /**
    435  * Callback function use by RTTimerStart via RTMpOnSpecific to start
    436  * a timer running on a specific CPU.
    437  *
    438  * @param   idCpu       The current CPU.
    439  * @param   pvUser1     Pointer to the timer.
    440  * @param   pvUser2     Pointer to the argument structure.
    441  */
    442 static DECLCALLBACK(void) rtTimerLnxStartOnSpecificCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
    443 {
    444     PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
    445     PRTTIMER pTimer = (PRTTIMER)pvUser1;
    446     rtTimerLnxStartSubTimer(&pTimer->aSubTimers[0], pArgs->u64Now, pArgs->u64First);
    447 }
    448 
    449 
    450 RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
    451 {
    452     RTTIMERLINUXSTARTONCPUARGS Args;
    453     int rc2;
    454 
    455     /*
    456      * Validate.
    457      */
    458     AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
    459     AssertPtrReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
    460 
    461     if (!pTimer->fSuspended)
    462         return VERR_TIMER_ACTIVE;
    463 
    464     Args.u64First = u64First;
    465 #ifdef CONFIG_SMP
    466     if (pTimer->fAllCpus)
    467         return rtTimerLnxStartAll(pTimer, &Args);
    468 #endif
    469 
    470     /*
    471      * This is pretty straight forwards.
    472      */
    473     Args.u64Now = RTTimeNanoTS();
    474     rtTimerLnxSetState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STARTING);
    475     ASMAtomicWriteBool(&pTimer->fSuspended, false);
    476     if (!pTimer->fSpecificCpu)
    477         rtTimerLnxStartSubTimer(&pTimer->aSubTimers[0], Args.u64Now, Args.u64First);
    478     else
    479     {
    480         rc2 = RTMpOnSpecific(pTimer->idCpu, rtTimerLnxStartOnSpecificCpu, pTimer, &Args);
    481         if (RT_FAILURE(rc2))
    482         {
    483             /* Suspend it, the cpu id is probably invalid or offline. */
    484             ASMAtomicWriteBool(&pTimer->fSuspended, true);
    485             rtTimerLnxSetState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STOPPED);
    486             return rc2;
    487         }
    488     }
    489 
    490     return VINF_SUCCESS;
    491 }
    492 
    493 
    494 RTDECL(int) RTTimerStop(PRTTIMER pTimer)
    495 {
    496 
    497     /*
    498      * Validate.
    499      */
    500     AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
    501     AssertPtrReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
    502 
    503     if (pTimer->fSuspended)
    504         return VERR_TIMER_SUSPENDED;
    505 
    506 #ifdef CONFIG_SMP
    507     if (pTimer->fAllCpus)
    508         return rtTimerLnxStopAll(pTimer);
    509 #endif
    510 
    511     /*
    512      * Cancel the timer.
    513      */
    514     ASMAtomicWriteBool(&pTimer->fSuspended, true); /* just to be on the safe side. */
    515     rtTimerLnxSetState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STOPPING);
    516     rtTimerLnxStopSubTimer(&pTimer->aSubTimers[0]);
    517 
    518     return VINF_SUCCESS;
    519 }
    520 
    521250
    522251
     
    633362
    634363
    635 
    636364#ifdef CONFIG_SMP
    637 
    638 /* */
    639 /* */
    640 /* The ALL CPUs stuff */
    641 /* The ALL CPUs stuff */
    642 /* The ALL CPUs stuff */
    643 /* */
    644 /* */
    645 
    646 
    647 
    648365
    649366/**
     
    898615
    899616#endif /* CONFIG_SMP */
     617
     618
     619/**
     620 * Callback function use by RTTimerStart via RTMpOnSpecific to start
     621 * a timer running on a specific CPU.
     622 *
     623 * @param   idCpu       The current CPU.
     624 * @param   pvUser1     Pointer to the timer.
     625 * @param   pvUser2     Pointer to the argument structure.
     626 */
     627static DECLCALLBACK(void) rtTimerLnxStartOnSpecificCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
     628{
     629    PRTTIMERLINUXSTARTONCPUARGS pArgs = (PRTTIMERLINUXSTARTONCPUARGS)pvUser2;
     630    PRTTIMER pTimer = (PRTTIMER)pvUser1;
     631    rtTimerLnxStartSubTimer(&pTimer->aSubTimers[0], pArgs->u64Now, pArgs->u64First);
     632}
     633
     634
     635RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
     636{
     637    RTTIMERLINUXSTARTONCPUARGS Args;
     638    int rc2;
     639
     640    /*
     641     * Validate.
     642     */
     643    AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
     644    AssertPtrReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
     645
     646    if (!pTimer->fSuspended)
     647        return VERR_TIMER_ACTIVE;
     648
     649    Args.u64First = u64First;
     650#ifdef CONFIG_SMP
     651    if (pTimer->fAllCpus)
     652        return rtTimerLnxStartAll(pTimer, &Args);
     653#endif
     654
     655    /*
     656     * This is pretty straight forwards.
     657     */
     658    Args.u64Now = RTTimeNanoTS();
     659    rtTimerLnxSetState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STARTING);
     660    ASMAtomicWriteBool(&pTimer->fSuspended, false);
     661    if (!pTimer->fSpecificCpu)
     662        rtTimerLnxStartSubTimer(&pTimer->aSubTimers[0], Args.u64Now, Args.u64First);
     663    else
     664    {
     665        rc2 = RTMpOnSpecific(pTimer->idCpu, rtTimerLnxStartOnSpecificCpu, pTimer, &Args);
     666        if (RT_FAILURE(rc2))
     667        {
     668            /* Suspend it, the cpu id is probably invalid or offline. */
     669            ASMAtomicWriteBool(&pTimer->fSuspended, true);
     670            rtTimerLnxSetState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STOPPED);
     671            return rc2;
     672        }
     673    }
     674
     675    return VINF_SUCCESS;
     676}
     677
     678
     679RTDECL(int) RTTimerStop(PRTTIMER pTimer)
     680{
     681
     682    /*
     683     * Validate.
     684     */
     685    AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
     686    AssertPtrReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
     687
     688    if (pTimer->fSuspended)
     689        return VERR_TIMER_SUSPENDED;
     690
     691#ifdef CONFIG_SMP
     692    if (pTimer->fAllCpus)
     693        return rtTimerLnxStopAll(pTimer);
     694#endif
     695
     696    /*
     697     * Cancel the timer.
     698     */
     699    ASMAtomicWriteBool(&pTimer->fSuspended, true); /* just to be on the safe side. */
     700    rtTimerLnxSetState(&pTimer->aSubTimers[0].enmState, RTTIMERLNXSTATE_STOPPING);
     701    rtTimerLnxStopSubTimer(&pTimer->aSubTimers[0]);
     702
     703    return VINF_SUCCESS;
     704}
     705
     706
     707RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
     708{
     709    RTSPINLOCK hSpinlock;
     710
     711    /* It's ok to pass NULL pointer. */
     712    if (pTimer == /*NIL_RTTIMER*/ NULL)
     713        return VINF_SUCCESS;
     714    AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
     715    AssertPtrReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
     716
     717    /*
     718     * Remove the MP notifications first because it'll reduce the risk of
     719     * us overtaking any MP event that might theoretically be racing us here.
     720     */
     721    hSpinlock = pTimer->hSpinlock;
     722#ifdef CONFIG_SMP
     723    if (    pTimer->cCpus > 1
     724        &&  hSpinlock != NIL_RTSPINLOCK)
     725    {
     726        int rc = RTMpNotificationDeregister(rtTimerLinuxMpEvent, pTimer);
     727        AssertRC(rc);
     728    }
     729#endif /* CONFIG_SMP */
     730
     731    /*
     732     * Stop the timer if it's running.
     733     */
     734    if (!ASMAtomicUoReadBool(&pTimer->fSuspended)) /* serious paranoia */
     735        RTTimerStop(pTimer);
     736
     737    /*
     738     * Uninitialize the structure and free the associated resources.
     739     * The spinlock goes last.
     740     */
     741    ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
     742    RTMemFree(pTimer);
     743    if (hSpinlock != NIL_RTSPINLOCK)
     744        RTSpinlockDestroy(hSpinlock);
     745
     746    return VINF_SUCCESS;
     747}
     748
     749
     750RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, unsigned fFlags, PFNRTTIMER pfnTimer, void *pvUser)
     751{
     752    PRTTIMER pTimer;
     753    RTCPUID  iCpu;
     754    unsigned cCpus;
     755
     756    *ppTimer = NULL;
     757
     758    /*
     759     * Validate flags.
     760     */
     761    if (!RTTIMER_FLAGS_IS_VALID(fFlags))
     762        return VERR_INVALID_PARAMETER;
     763    if (    (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
     764        &&  (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
     765        &&  !RTMpIsCpuOnline(fFlags & RTTIMER_FLAGS_CPU_MASK))
     766        return (fFlags & RTTIMER_FLAGS_CPU_MASK) > RTMpGetMaxCpuId()
     767             ? VERR_CPU_NOT_FOUND
     768             : VERR_CPU_OFFLINE;
     769
     770    /*
     771     * Allocate the timer handler.
     772     */
     773    cCpus = 1;
     774#ifdef CONFIG_SMP
     775    if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
     776    {
     777        cCpus = RTMpGetMaxCpuId() + 1;
     778        Assert(cCpus <= RTCPUSET_MAX_CPUS); /* On linux we have a 1:1 relationship between cpuid and set index. */
     779        AssertReturn(u64NanoInterval, VERR_NOT_IMPLEMENTED); /* We don't implement single shot on all cpus, sorry. */
     780    }
     781#endif
     782
     783    pTimer = (PRTTIMER)RTMemAllocZ(RT_OFFSETOF(RTTIMER, aSubTimers[cCpus]));
     784    if (!pTimer)
     785        return VERR_NO_MEMORY;
     786
     787    /*
     788     * Initialize it.
     789     */
     790    pTimer->u32Magic = RTTIMER_MAGIC;
     791    pTimer->hSpinlock = NIL_RTSPINLOCK;
     792    pTimer->fSuspended = true;
     793#ifdef CONFIG_SMP
     794    pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL;
     795    pTimer->fAllCpus = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL;
     796    pTimer->idCpu = fFlags & RTTIMER_FLAGS_CPU_MASK;
     797#else
     798    pTimer->fSpecificCpu = !!(fFlags & RTTIMER_FLAGS_CPU_SPECIFIC);
     799    pTimer->idCpu = RTMpCpuId();
     800#endif
     801    pTimer->cCpus = cCpus;
     802    pTimer->pfnTimer = pfnTimer;
     803    pTimer->pvUser = pvUser;
     804    pTimer->u64NanoInterval = u64NanoInterval;
     805
     806    for (iCpu = 0; iCpu < cCpus; iCpu++)
     807    {
     808#ifdef RT_USE_LINUX_HRTIMER
     809        hrtimer_init(&pTimer->aSubTimers[iCpu].LnxTimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
     810        pTimer->aSubTimers[iCpu].LnxTimer.function = rtTimerLinuxCallback;
     811#else
     812        init_timer(&pTimer->aSubTimers[iCpu].LnxTimer);
     813        pTimer->aSubTimers[iCpu].LnxTimer.data     = (unsigned long)pTimer;
     814        pTimer->aSubTimers[iCpu].LnxTimer.function = rtTimerLinuxCallback;
     815        pTimer->aSubTimers[iCpu].LnxTimer.expires  = jiffies;
     816#endif
     817        pTimer->aSubTimers[iCpu].u64StartTS = 0;
     818        pTimer->aSubTimers[iCpu].u64NextTS = 0;
     819        pTimer->aSubTimers[iCpu].iTick = 0;
     820        pTimer->aSubTimers[iCpu].pParent = pTimer;
     821        pTimer->aSubTimers[iCpu].enmState = RTTIMERLNXSTATE_STOPPED;
     822    }
     823
     824#ifdef CONFIG_SMP
     825    /*
     826     * If this is running on ALL cpus, we'll have to register a callback
     827     * for MP events (so timers can be started/stopped on cpus going
     828     * online/offline). We also create the spinlock for syncrhonizing
     829     * stop/start/mp-event.
     830     */
     831    if (cCpus > 1)
     832    {
     833        int rc = RTSpinlockCreate(&pTimer->hSpinlock);
     834        if (RT_SUCCESS(rc))
     835            rc = RTMpNotificationRegister(rtTimerLinuxMpEvent, pTimer);
     836        else
     837            pTimer->hSpinlock = NIL_RTSPINLOCK;
     838        if (RT_FAILURE(rc))
     839        {
     840            RTTimerDestroy(pTimer);
     841            return rc;
     842        }
     843    }
     844#endif /* CONFIG_SMP */
     845
     846    *ppTimer = pTimer;
     847    return VINF_SUCCESS;
     848}
     849
     850
     851RTDECL(uint32_t) RTTimerGetSystemGranularity(void)
     852{
     853#ifdef RT_USE_LINUX_HRTIMER
     854    /** @todo later... */
     855    return 1000000000 / HZ; /* ns */
     856#else
     857    return 1000000000 / HZ; /* ns */
     858#endif
     859}
     860
     861
     862RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
     863{
     864    return VERR_NOT_SUPPORTED;
     865}
     866
     867
     868RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
     869{
     870    return VERR_NOT_SUPPORTED;
     871}
     872
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette