VirtualBox

Changeset 87633 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Feb 5, 2021 9:37:09 PM (4 years ago)
Author:
vboxsync
Message:

VMM/TM,VMM/HMVMX: Try avoid calling TMCpuTickGetDeadlineAndTscOffset as it is expensive. Current approach is a bit erratic wrt CPUID benchmark results, but it's generally better than before.

Location:
trunk/src/VBox/VMM
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp

    r87292 r87633  
    272272
    273273
     274#ifdef IN_RING0 /* Only used in ring-0 at present (AMD-V and VT-x). */
    274275/**
    275276 * Checks if AMD-V / VT-x can use an offsetted hardware TSC or not.
     
    308309        /** @todo We should negate both deltas!  It's soo weird that we do the
    309310         *        exact opposite of what the hardware implements. */
    310 #ifdef IN_RING3
     311# ifdef IN_RING3
    311312        *poffRealTsc = (uint64_t)0 - pVCpu->tm.s.offTSCRawSrc - (uint64_t)SUPGetTscDelta();
    312 #else
     313# else
    313314        *poffRealTsc = (uint64_t)0 - pVCpu->tm.s.offTSCRawSrc - (uint64_t)SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
    314 #endif
     315# endif
    315316        return true;
    316317    }
     
    331332    {
    332333        /* The source is the timer synchronous virtual clock. */
    333         uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
     334        uint64_t uTscNow;
     335        uint64_t u64Now = tmCpuTickCalcFromVirtual(pVM, TMVirtualSyncGetNoCheckWithTsc(pVM, &uTscNow))
    334336                        - pVCpu->tm.s.offTSCRawSrc;
    335337        /** @todo When we start collecting statistics on how much time we spend executing
     
    339341        if (u64Now >= pVCpu->tm.s.u64TSCLastSeen)
    340342        {
    341             *poffRealTsc = u64Now - ASMReadTSC();
     343# ifdef IN_RING3
     344            *poffRealTsc = u64Now - (uTscNow + (uint64_t)SUPGetTscDelta();
     345# else
     346            *poffRealTsc = u64Now - (uTscNow + (uint64_t)SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet));
     347# endif
    342348            return true;    /** @todo count this? */
    343349        }
    344350    }
    345351
    346 #ifdef VBOX_WITH_STATISTICS
     352# ifdef VBOX_WITH_STATISTICS
    347353    tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
    348 #endif
     354# endif
    349355    return false;
    350356}
    351 
     357#endif /* IN_RING0 - at the moment */
    352358
    353359/**
     
    383389
    384390
     391#ifdef IN_RING0 /* Only used in ring-0 from VT-x code at the moment. */
    385392/**
    386393 * Gets the next deadline in host CPU clock ticks and the TSC offset if we can
     
    388395 *
    389396 * @returns The number of host CPU clock ticks to the next timer deadline.
    390  * @param   pVM             The cross context VM structure.
    391  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    392  * @param   poffRealTsc     The offset against the TSC of the current host CPU,
    393  *                          if pfOffsettedTsc is set to true.
    394  * @param   pfOffsettedTsc  Where to return whether TSC offsetting can be used.
    395  * @param   pfParavirtTsc   Where to return whether paravirt TSC is enabled.
     397 * @param   pVM                 The cross context VM structure.
     398 * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
     399 * @param   poffRealTsc         The offset against the TSC of the current host CPU,
     400 *                              if pfOffsettedTsc is set to true.
     401 * @param   pfOffsettedTsc      Where to return whether TSC offsetting can be used.
     402 * @param   pfParavirtTsc       Where to return whether paravirt TSC is enabled.
     403 * @param   puTscNow            Where to return the TSC value that the return
     404 *                              value is relative to.   This is delta adjusted.
     405 * @param   puDeadlineVersion   Where to return the deadline "version" number.
     406 *                              Use with TMVirtualSyncIsCurrentDeadlineVersion()
     407 *                              to check if the absolute deadline is still up to
     408 *                              date and the caller can skip calling this
     409 *                              function.
    396410 *
    397411 * @thread  EMT(pVCpu).
     
    399413 */
    400414VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *poffRealTsc,
    401                                                         bool *pfOffsettedTsc, bool *pfParavirtTsc)
     415                                                        bool *pfOffsettedTsc, bool *pfParavirtTsc,
     416                                                        uint64_t *puTscNow, uint64_t *puDeadlineVersion)
    402417{
    403418    Assert(pVCpu->tm.s.fTSCTicking || DBGFIsStepping(pVCpu));
     
    412427        /** @todo We should negate both deltas!  It's soo weird that we do the
    413428         *        exact opposite of what the hardware implements. */
    414 #ifdef IN_RING3
     429# ifdef IN_RING3
    415430        *poffRealTsc     = (uint64_t)0 - pVCpu->tm.s.offTSCRawSrc - (uint64_t)SUPGetTscDelta();
    416 #else
     431# else
    417432        *poffRealTsc     = (uint64_t)0 - pVCpu->tm.s.offTSCRawSrc - (uint64_t)SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
    418 #endif
     433# endif
    419434        *pfOffsettedTsc  = true;
    420         return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM));
     435        return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM, puDeadlineVersion, puTscNow));
    421436    }
    422437
     
    431446        /* The source is the timer synchronous virtual clock. */
    432447        uint64_t cNsToDeadline;
    433         uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline);
     448        uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline, puDeadlineVersion, puTscNow);
    434449        uint64_t u64Now = tmCpuTickCalcFromVirtual(pVM, u64NowVirtSync);
    435450        u64Now -= pVCpu->tm.s.offTSCRawSrc;
    436         *poffRealTsc     = u64Now - ASMReadTSC();
     451
     452# ifdef IN_RING3
     453        *poffRealTsc     = u64Now - (*puTscNow + (uint64_t)SUPGetTscDelta()); /* undoing delta */
     454# else
     455        *poffRealTsc     = u64Now - (*puTscNow + (uint64_t)SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet)); /* undoing delta */
     456# endif
    437457        *pfOffsettedTsc  = u64Now >= pVCpu->tm.s.u64TSCLastSeen;
    438458        return tmCpuCalcTicksToDeadline(pVCpu, cNsToDeadline);
    439459    }
    440460
    441 #ifdef VBOX_WITH_STATISTICS
     461# ifdef VBOX_WITH_STATISTICS
    442462    tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
    443 #endif
     463# endif
    444464    *pfOffsettedTsc  = false;
    445465    *poffRealTsc     = 0;
    446     return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM));
    447 }
     466    return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM, puDeadlineVersion, puTscNow));
     467}
     468#endif /* IN_RING0 - at the moment */
    448469
    449470
  • trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp

    r87626 r87633  
    180180
    181181/**
     182 * Wrapper around the IPRT GIP time methods, extended version.
     183 */
     184DECLINLINE(uint64_t) tmVirtualGetRawNanoTSEx(PVMCC pVM, uint64_t *puTscNow)
     185{
     186    RTITMENANOTSEXTRA Extra;
     187# ifdef IN_RING3
     188    uint64_t u64 = CTXALLSUFF(pVM->tm.s.pfnVirtualGetRaw)(&CTXALLSUFF(pVM->tm.s.VirtualGetRawData), &Extra);
     189# else  /* !IN_RING3 */
     190    uint32_t cPrevSteps = pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps;
     191    uint64_t u64 = pVM->tm.s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData), &Extra);
     192    if (cPrevSteps != pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps)
     193        VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
     194# endif /* !IN_RING3 */
     195    if (puTscNow)
     196        *puTscNow = Extra.uTSCValue;
     197    /*DBGFTRACE_POS_U64(pVM, u64);*/
     198    return u64;
     199}
     200
     201
     202/**
    182203 * Get the time when we're not running at 100%
    183204 *
    184205 * @returns The timestamp.
    185  * @param   pVM     The cross context VM structure.
    186  */
    187 static uint64_t tmVirtualGetRawNonNormal(PVMCC pVM)
     206 * @param   pVM         The cross context VM structure.
     207 * @param   puTscNow    Where to return the TSC corresponding to the returned
     208 *                      timestamp (delta adjusted). Optional.
     209 */
     210static uint64_t tmVirtualGetRawNonNormal(PVMCC pVM, uint64_t *puTscNow)
    188211{
    189212    /*
     
    191214     * warp drive has been enabled.
    192215     */
    193     uint64_t u64 = tmVirtualGetRawNanoTS(pVM);
     216    uint64_t u64 = tmVirtualGetRawNanoTSEx(pVM, puTscNow);
    194217    u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
    195218    u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
     
    211234 *
    212235 * @returns The current time stamp.
    213  * @param   pVM     The cross context VM structure.
     236 * @param   pVM         The cross context VM structure.
    214237 */
    215238DECLINLINE(uint64_t) tmVirtualGetRaw(PVMCC pVM)
     
    217240    if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
    218241        return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
    219     return tmVirtualGetRawNonNormal(pVM);
     242    return tmVirtualGetRawNonNormal(pVM, NULL /*puTscNow*/);
     243}
     244
     245
     246/**
     247 * Get the raw virtual time, extended version.
     248 *
     249 * @returns The current time stamp.
     250 * @param   pVM         The cross context VM structure.
     251 * @param   puTscNow    Where to return the TSC corresponding to the returned
     252 *                      timestamp (delta adjusted). Optional.
     253 */
     254DECLINLINE(uint64_t) tmVirtualGetRawEx(PVMCC pVM, uint64_t *puTscNow)
     255{
     256    if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
     257        return tmVirtualGetRawNanoTSEx(pVM, puTscNow) - pVM->tm.s.u64VirtualOffset;
     258    return tmVirtualGetRawNonNormal(pVM, puTscNow);
    220259}
    221260
     
    322361 *                              the next virtual sync timer deadline. Can be
    323362 *                              NULL.
    324  */
    325 DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVMCC pVM, uint64_t u64, uint64_t off, uint64_t *pcNsToDeadline)
     363 * @param   pnsAbsDeadline      Where to return the absolute deadline.
     364 *                              Optional.
     365 */
     366DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVMCC pVM, uint64_t u64, uint64_t off,
     367                                                         uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
    326368{
    327369    STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
     
    375417
    376418    uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
     419    if (pnsAbsDeadline)
     420        *pnsAbsDeadline = u64Expire; /* Always return the unadjusted absolute deadline, or HM will waste time going
     421                                        thru this code over an over again even if there aren't any timer changes. */
    377422    if (u64 < u64Expire)
    378423    {
     
    432477 *                              the next virtual sync timer deadline.  Can be
    433478 *                              NULL.
    434  */
    435 DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVMCC pVM, uint64_t u64, uint64_t *pcNsToDeadline)
     479 * @param   pnsAbsDeadline      Where to return the absolute deadline.
     480 *                              Optional.
     481 */
     482DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVMCC pVM, uint64_t u64, uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline)
    436483{
    437484    /*
     
    444491        if (pcNsToDeadline)
    445492            *pcNsToDeadline = 0;
     493        if (pnsAbsDeadline)
     494            *pnsAbsDeadline = u64;
    446495        STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
    447496        Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64));
     
    455504    uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
    456505    if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
    457         return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline);
     506        return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline, pnsAbsDeadline);
    458507
    459508    /*
     
    472521
    473522    uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
     523    if (pnsAbsDeadline)
     524        *pnsAbsDeadline = u64Expire;
    474525    if (u64 < u64Expire)
    475526    {
     
    516567 *                              the next virtual sync timer deadline.  Can be
    517568 *                              NULL.
     569 * @param   pnsAbsDeadline      Where to return the absolute deadline.
     570 *                              Optional.
     571 * @param   puTscNow            Where to return the TSC corresponding to the
     572 *                              returned timestamp (delta adjusted). Optional.
    518573 * @thread  EMT.
    519574 */
    520 DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers, uint64_t *pcNsToDeadline)
     575DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers, uint64_t *pcNsToDeadline,
     576                                        uint64_t *pnsAbsDeadline, uint64_t *puTscNow)
    521577{
    522578    STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet);
     
    536592     */
    537593    Assert(pVM->tm.s.cVirtualTicking);
    538     u64 = tmVirtualGetRaw(pVM);
     594    u64 = tmVirtualGetRawEx(pVM, puTscNow);
    539595    if (fCheckTimers)
    540596    {
     
    559615     *       which is less picky or hasn't been adjusted yet
    560616     */
     617    /** @todo switch this around, have the tmVirtualSyncGetLocked code inlined
     618     *        here and the remainder of this function in a static worker. */
    561619    if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
    562         return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline);
     620        return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
    563621
    564622    /*
     
    580638                if (off < u64Expire)
    581639                {
     640                    if (pnsAbsDeadline)
     641                        *pnsAbsDeadline = u64Expire;
    582642                    if (pcNsToDeadline)
    583643                        *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - off);
     
    597657            if (pcNsToDeadline)
    598658                *pcNsToDeadline = 0;
     659            if (pnsAbsDeadline)
     660                *pnsAbsDeadline = off;
    599661            STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
    600662            Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off));
     
    629691        int rcLock = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
    630692        if (RT_SUCCESS_NP(rcLock))
    631             return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline);
     693            return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline);
    632694
    633695        /* Re-check the ticking flag. */
     
    640702            if (pcNsToDeadline)
    641703                *pcNsToDeadline = 0;
     704            if (pnsAbsDeadline)
     705                *pnsAbsDeadline = off;
    642706            Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off));
    643707            DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped3");
     
    704768/** @todo u64VirtualSyncLast */
    705769    uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
     770    if (pnsAbsDeadline)
     771        *pnsAbsDeadline = u64Expire;
    706772    if (u64 >= u64Expire)
    707773    {
     
    749815VMM_INT_DECL(uint64_t) TMVirtualSyncGet(PVMCC pVM)
    750816{
    751     return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/);
     817    return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
    752818}
    753819
     
    764830VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheck(PVMCC pVM)
    765831{
    766     return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/);
     832    return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
     833}
     834
     835
     836/**
     837 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
     838 * TMCLOCK_VIRTUAL, also returning corresponding TSC value.
     839 *
     840 * @returns The timestamp.
     841 * @param   pVM             The cross context VM structure.
     842 * @param   puTscNow        Where to return the TSC value that the return
     843 *                          value is relative to.   This is delta adjusted.
     844 * @thread  EMT.
     845 * @remarks May set the timer and virtual sync FFs.
     846 */
     847VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheckWithTsc(PVMCC pVM, uint64_t *puTscNow)
     848{
     849    return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, puTscNow);
    767850}
    768851
     
    779862VMM_INT_DECL(uint64_t) TMVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers)
    780863{
    781     return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/);
     864    return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/);
    782865}
    783866
     
    791874 * @param   pcNsToDeadline      Where to return the number of nano seconds to
    792875 *                              the next virtual sync timer deadline.
     876 * @param   puTscNow            Where to return the TSC value that the return
     877 *                              value is relative to.   This is delta adjusted.
     878 * @param   puDeadlineVersion   Where to return the deadline "version" number.
     879 *                              Use with TMVirtualSyncIsCurrentDeadlineVersion()
     880 *                              to check if the absolute deadline is still up to
     881 *                              date and the caller can skip calling this
     882 *                              function.
    793883 * @thread  EMT.
    794884 * @remarks May set the timer and virtual sync FFs.
    795885 */
    796 VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVMCC pVM, uint64_t *pcNsToDeadline)
     886VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVMCC pVM, uint64_t *pcNsToDeadline,
     887                                                           uint64_t *puDeadlineVersion, uint64_t *puTscNow)
    797888{
    798889    uint64_t cNsToDeadlineTmp;       /* try convince the compiler to skip the if tests. */
    799     uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp);
     890    uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp, puDeadlineVersion, puTscNow);
    800891    *pcNsToDeadline = cNsToDeadlineTmp;
    801892    return u64Now;
     
    808899 * @returns The number of TMCLOCK_VIRTUAL ticks.
    809900 * @param   pVM                 The cross context VM structure.
     901 * @param   puTscNow            Where to return the TSC value that the return
     902 *                              value is relative to.   This is delta adjusted.
     903 * @param   puDeadlineVersion   Where to return the deadline "version" number.
     904 *                              Use with TMVirtualSyncIsCurrentDeadlineVersion()
     905 *                              to check if the absolute deadline is still up to
     906 *                              date and the caller can skip calling this
     907 *                              function.
    810908 * @thread  EMT.
    811909 * @remarks May set the timer and virtual sync FFs.
    812910 */
    813 VMMDECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVMCC pVM)
     911VMMDECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVMCC pVM, uint64_t *puDeadlineVersion, uint64_t *puTscNow)
    814912{
    815913    uint64_t cNsToDeadline;
    816     tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline);
     914    tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline, puDeadlineVersion, puTscNow);
    817915    return cNsToDeadline;
     916}
     917
     918
     919/**
     920 * Checks if the given deadline is still current.
     921 *
     922 * @retval  true if the deadline is still current.
     923 * @retval  false if the deadline is outdated.
     924 * @param   pVM                 The cross context VM structure.
     925 * @param   uDeadlineVersion    The deadline version to check.
     926 */
     927VMM_INT_DECL(bool) TMVirtualSyncIsCurrentDeadlineVersion(PVMCC pVM, uint64_t uDeadlineVersion)
     928{
     929    /** @todo Try use ASMAtomicUoReadU64 instead. */
     930    uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
     931    return u64Expire == uDeadlineVersion;
    818932}
    819933
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r87625 r87633  
    22842284    Assert(pGuestMsrLoad);
    22852285
     2286#ifndef DEBUG_bird
    22862287    LogFlowFunc(("pVCpu=%p idMsr=%#RX32 uGuestMsrValue=%#RX64\n", pVCpu, idMsr, uGuestMsrValue));
     2288#endif
    22872289
    22882290    /* Check if the MSR already exists in the VM-entry MSR-load area. */
     
    23692371    uint32_t        cMsrs         = pVmcsInfo->cEntryMsrLoad;
    23702372
     2373#ifndef DEBUG_bird
    23712374    LogFlowFunc(("pVCpu=%p idMsr=%#RX32\n", pVCpu, idMsr));
     2375#endif
    23722376
    23732377    for (uint32_t i = 0; i < cMsrs; i++)
     
    71147118 * @param   pVCpu           The cross context virtual CPU structure.
    71157119 * @param   pVmxTransient   The VMX-transient structure.
     7120 * @param   idCurrentCpu    The current CPU number.
    71167121 *
    71177122 * @remarks No-long-jump zone!!!
    71187123 */
    7119 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
     7124static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, RTCPUID idCurrentCpu)
    71207125{
    71217126    bool         fOffsettedTsc;
    71227127    bool         fParavirtTsc;
    71237128    uint64_t     uTscOffset;
    7124     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
     7129    PVMCC        pVM      = pVCpu->CTX_SUFF(pVM);
    71257130    PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    71267131
    71277132    if (pVM->hmr0.s.vmx.fUsePreemptTimer)
    71287133    {
    7129         uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &uTscOffset, &fOffsettedTsc, &fParavirtTsc);
     7134
     7135        /* The TMCpuTickGetDeadlineAndTscOffset function is expensive (calling it on
     7136           every entry slowed down the bs2-test1 CPUID testcase by ~33% (on an 10980xe). */
     7137        uint64_t cTicksToDeadline;
     7138        if (   idCurrentCpu == pVCpu->hmr0.s.idLastCpu
     7139            && TMVirtualSyncIsCurrentDeadlineVersion(pVM, pVCpu->hmr0.s.vmx.uTscDeadlineVersion))
     7140        {
     7141            STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatVmxPreemptionReusingDeadline);
     7142            fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
     7143            cTicksToDeadline = pVCpu->hmr0.s.vmx.uTscDeadline - SUPReadTsc();
     7144            if ((int64_t)cTicksToDeadline > 0)
     7145            { /* hopefully */ }
     7146            else
     7147            {
     7148                STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatVmxPreemptionReusingDeadlineExpired);
     7149                cTicksToDeadline = 0;
     7150            }
     7151        }
     7152        else
     7153        {
     7154            STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatVmxPreemptionRecalcingDeadline);
     7155            cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &uTscOffset, &fOffsettedTsc, &fParavirtTsc,
     7156                                                                &pVCpu->hmr0.s.vmx.uTscDeadline,
     7157                                                                &pVCpu->hmr0.s.vmx.uTscDeadlineVersion);
     7158            pVCpu->hmr0.s.vmx.uTscDeadline += cTicksToDeadline;
     7159            if (cTicksToDeadline >= 128)
     7160            { /* hopefully */ }
     7161            else
     7162                STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatVmxPreemptionRecalcingDeadlineExpired);
     7163        }
    71307164
    71317165        /* Make sure the returned values have sane upper and lower boundaries. */
    7132         uint64_t u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
     7166        uint64_t const u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
    71337167        cTicksToDeadline   = RT_MIN(cTicksToDeadline, u64CpuHz / 64);      /* 1/64th of a second */ /** @todo r=bird: Once real+virtual timers move to separate thread, we can raise the upper limit (16ms isn't much). ASSUMES working poke cpu function. */
    71347168        cTicksToDeadline   = RT_MAX(cTicksToDeadline, u64CpuHz / 2048);    /* 1/2048th of a second */
     
    1104211076        || idCurrentCpu != pVCpu->hmr0.s.idLastCpu)
    1104311077    {
    11044         hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pVmxTransient);
     11078        hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pVmxTransient, idCurrentCpu);
    1104511079        pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = true;
    1104611080    }
     
    1500115035    /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */
    1500215036    pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;
     15037Log12(("hmR0VmxExitPreemptTimer:\n"));
    1500315038
    1500415039    /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
    1500515040    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    1500615041    bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
    15007     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
     15042    STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
    1500815043    return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
    1500915044}
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r87563 r87633  
    704704#endif
    705705
    706 #ifdef VBOX_WITH_STATISTICS
    707706    bool const fCpuSupportsVmx = ASMIsIntelCpu() || ASMIsViaCentaurCpu() || ASMIsShanghaiCpu();
    708 #endif
    709707    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
    710708    {
     
    792790        HM_REG_COUNTER(&pHmCpu->StatExitHostNmiInGC,        "/HM/CPU%u/Exit/HostNmiInGC", "Host NMI received while in guest context.");
    793791        HM_REG_COUNTER(&pHmCpu->StatExitHostNmiInGCIpi,     "/HM/CPU%u/Exit/HostNmiInGCIpi", "Host NMI received while in guest context dispatched using IPIs.");
     792        HM_REG_COUNTER(&pHmCpu->StatExitPreemptTimer,       "/HM/CPU%u/Exit/PreemptTimer", "VMX-preemption timer expired.");
    794793#ifdef VBOX_WITH_STATISTICS
    795         HM_REG_COUNTER(&pHmCpu->StatExitPreemptTimer,       "/HM/CPU%u/Exit/PreemptTimer", "VMX-preemption timer expired.");
    796794        HM_REG_COUNTER(&pHmCpu->StatExitTprBelowThreshold,  "/HM/CPU%u/Exit/TprBelowThreshold", "TPR lowered below threshold by the guest.");
    797795        HM_REG_COUNTER(&pHmCpu->StatExitTaskSwitch,         "/HM/CPU%u/Exit/TaskSwitch", "Task switch caused through task gate in IDT.");
     
    871869        HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRpl,         "/HM/CPU%u/VMXCheck/RPL", "Could not use VMX due to unsuitable RPL.");
    872870        HM_REG_COUNTER(&pHmCpu->StatVmxCheckPmOk,           "/HM/CPU%u/VMXCheck/VMX_PM", "VMX execution in protected mode OK.");
    873 
     871#endif
     872        if (fCpuSupportsVmx)
     873        {
     874            HM_REG_COUNTER(&pHmCpu->StatExitPreemptTimer,                      "/HM/CPU%u/PreemptTimer",                          "VMX-preemption timer fired.");
     875            HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionReusingDeadline,          "/HM/CPU%u/PreemptTimer/ReusingDeadline",          "VMX-preemption timer arming logic using previously calculated deadline");
     876            HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionReusingDeadlineExpired,   "/HM/CPU%u/PreemptTimer/ReusingDeadlineExpired",   "VMX-preemption timer arming logic found previous deadline already expired (ignored)");
     877            HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionRecalcingDeadline,        "/HM/CPU%u/PreemptTimer/RecalcingDeadline",        "VMX-preemption timer arming logic recalculating the deadline (slighly expensive)");
     878            HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionRecalcingDeadlineExpired, "/HM/CPU%u/PreemptTimer/RecalcingDeadlineExpired", "VMX-preemption timer arming logic found recalculated deadline expired (ignored)");
     879        }
     880#ifdef VBOX_WITH_STATISTICS
    874881        /*
    875882         * Guest Exit reason stats.
  • trunk/src/VBox/VMM/include/HMInternal.h

    r87606 r87633  
    13211321    STAMCOUNTER             StatVmxCheckPmOk;
    13221322
     1323    STAMCOUNTER             StatVmxPreemptionRecalcingDeadline;
     1324    STAMCOUNTER             StatVmxPreemptionRecalcingDeadlineExpired;
     1325    STAMCOUNTER             StatVmxPreemptionReusingDeadline;
     1326    STAMCOUNTER             StatVmxPreemptionReusingDeadlineExpired;
     1327
    13231328#ifdef VBOX_WITH_STATISTICS
    13241329    R3PTRTYPE(PSTAMCOUNTER) paStatExitReason;
     
    13891394        /** Ring-0 pointer to the hardware-assisted VMX execution function. */
    13901395        PFNHMVMXSTARTVM             pfnStartVm;
     1396        /** Absolute TSC deadline. */
     1397        uint64_t                    uTscDeadline;
     1398        /** The deadline version number. */
     1399        uint64_t                    uTscDeadlineVersion;
    13911400
    13921401        /** @name Guest information.
  • trunk/src/VBox/VMM/include/HMInternal.mac

    r87522 r87633  
    182182struc HMR0CPUVMX
    183183    .pfnStartVm                     RTR0PTR_RES  1
     184    .uTscDeadline                   resq    1
     185    .uTscDeadlineVersion            resq    1
     186
    184187
    185188    .VmcsInfo                       resb    VMXVMCSINFO_size
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette