VirtualBox

Changeset 54308 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Feb 19, 2015 7:43:51 PM (10 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
98383
Message:

VMM,SUP: Apply the tsc delta where it matters. Made sense out of the paravirt-tsc-mode enable/disable code.

Location:
trunk/src/VBox/VMM
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r53781 r54308  
    404404 VMMRC_SYSSUFF   = .gc
    405405
    406  VMMRC_DEFS      = IN_VMM_RC IN_RT_RC IN_DIS DIS_CORE_ONLY VBOX_WITH_RAW_MODE VBOX_WITH_RAW_MODE_NOT_R0 $(VMM_COMMON_DEFS)
     406 VMMRC_DEFS      = IN_VMM_RC IN_RT_RC IN_DIS DIS_CORE_ONLY VBOX_WITH_RAW_MODE VBOX_WITH_RAW_MODE_NOT_R0 IN_SUP_RC \
     407        $(VMM_COMMON_DEFS)
    407408 ifdef VBOX_WITH_VMM_R0_SWITCH_STACK
    408409  VMMRC_DEFS    += VMM_R0_SWITCH_STACK
     
    422423 VMMRC_LIBS      = \
    423424        $(PATH_STAGE_LIB)/DisasmRC$(VBOX_SUFF_LIB) \
    424         $(PATH_STAGE_LIB)/RuntimeRC$(VBOX_SUFF_LIB)
     425        $(PATH_STAGE_LIB)/RuntimeRC$(VBOX_SUFF_LIB) \
     426        $(PATH_STAGE_LIB)/SUPRC$(VBOX_SUFF_LIB)
    425427 ifneq ($(filter pe lx,$(VBOX_LDR_FMT32)),)
    426428  VMMRC_LIBS    += \
  • trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp

    r54065 r54308  
    5151
    5252
     53#ifdef IN_RING3
     54/**
     55 * Used by tmR3CpuTickParavirtEnable and tmR3CpuTickParavirtDisable.
     56 */
     57uint64_t tmR3CpuTickGetRawVirtualNoCheck(PVM pVM)
     58{
     59    return tmCpuTickGetRawVirtual(pVM, false /*fCheckTimers*/);
     60}
     61#endif
     62
     63
    5364/**
    5465 * Resumes the CPU timestamp counter ticking.
     
    6879         *        unpaused before the virtual time and stopped after it. */
    6980        if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
    70             pVCpu->tm.s.offTSCRawSrc = ASMReadTSC() - pVCpu->tm.s.u64TSC;
     81            pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVCpu->tm.s.u64TSC;
    7182        else
    7283            pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
     
    103114            /* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */
    104115            if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
    105                 pVCpu->tm.s.offTSCRawSrc = ASMReadTSC() - pVM->tm.s.u64LastPausedTSC;
     116                pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVM->tm.s.u64LastPausedTSC;
    106117            else
    107118                pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
     
    211222 *
    212223 * @returns true/false accordingly.
     224 * @param   pVM             Pointer to the cross context VM structure.
    213225 * @param   pVCpu           Pointer to the VMCPU.
    214  * @param   poffRealTSC     The offset against the TSC of the current CPU.
    215  * @param   pfParavirtTsc   Where to store whether paravirt. TSC is enabled.
     226 * @param   poffRealTsc     The offset against the TSC of the current host CPU,
     227 *                          if pfOffsettedTsc is set to true.
     228 * @param   pfParavirtTsc   Where to return whether paravirt TSC is enabled.
    216229 *
    217230 * @thread  EMT(pVCpu).
    218231 * @see     TMCpuTickGetDeadlineAndTscOffset().
    219232 */
    220 VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfParavirtTsc)
    221 {
    222     PVM pVM = pVCpu->CTX_SUFF(pVM);
    223     bool fOffsettedTsc = false;
     233VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVM pVM, PVMCPU pVCpu, uint64_t *poffRealTsc, bool *pfParavirtTsc)
     234{
     235    Assert(pVCpu->tm.s.fTSCTicking);
     236
     237    *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
     238
     239    /*
     240     * In real TSC mode it's easy, we just need the delta & offTscRawSrc and
     241     * the CPU will add them to RDTSC and RDTSCP at runtime.
     242     *
     243     * In tmCpuTickGetInternal we do:
     244     *          SUPReadTsc() - pVCpu->tm.s.offTSCRawSrc;
     245     * Where SUPReadTsc() does:
     246     *          ASMReadTSC() - pGipCpu->i64TscDelta;
     247     * Which means tmCpuTickGetInternal actually does:
     248     *          ASMReadTSC() - pGipCpu->i64TscDelta - pVCpu->tm.s.offTSCRawSrc;
     249     * So, the offset to be ADDED to RDTSC[P] is:
     250     *          offRealTsc = -(pGipCpu->i64TscDelta + pVCpu->tm.s.offTSCRawSrc)
     251     */
     252    if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
     253    {
     254        /** @todo We should negate both deltas!  It's soo weird that we do the
     255         *        exact opposite of what the hardware implements. */
     256#ifdef IN_RING3
     257        *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDelta();
     258#else
     259        *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
     260#endif
     261        return true;
     262    }
    224263
    225264    /*
     
    232271     *          c) we're not using warp drive (accelerated virtual guest time).
    233272     */
    234     Assert(pVCpu->tm.s.fTSCTicking);
    235     *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
    236 
    237     if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
    238     {
    239         /* The source is the real TSC. */
    240         *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc;
    241         return true;    /** @todo count this? */
    242     }
    243 
    244273    if (   pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
    245274        && !pVM->tm.s.fVirtualSyncCatchUp
     
    254283         * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase
    255284         * the chance that we'll get interrupted right after the timer expired. */
    256         uint64_t u64TSC = ASMReadTSC();     /** @todo should be replaced with SUPReadTSC() eventually. */
    257         *poffRealTSC = u64Now - u64TSC;
    258         fOffsettedTsc = u64Now >= pVCpu->tm.s.u64TSCLastSeen;
    259         return true;    /** @todo count this? */
     285        if (u64Now >= pVCpu->tm.s.u64TSCLastSeen)
     286        {
     287            *poffRealTsc = u64Now - ASMReadTSC();
     288            return true;    /** @todo count this? */
     289        }
    260290    }
    261291
     
    274304 *
    275305 * @returns The number of host cpu ticks to the next deadline.  Max one second.
    276  * @param   cNsToDeadline       The number of nano seconds to the next virtual
    277  *                              sync deadline.
    278  */
    279 DECLINLINE(uint64_t) tmCpuCalcTicksToDeadline(uint64_t cNsToDeadline)
     306 * @param   pVCpu           The current CPU.
     307 * @param   cNsToDeadline   The number of nano seconds to the next virtual
     308 *                          sync deadline.
     309 */
     310DECLINLINE(uint64_t) tmCpuCalcTicksToDeadline(PVMCPU pVCpu, uint64_t cNsToDeadline)
    280311{
    281312    AssertCompile(TMCLOCK_FREQ_VIRTUAL <= _4G);
     313#ifdef IN_RING3
     314    uint64_t uCpuHz = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
     315#else
     316    uint64_t uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
     317#endif
    282318    if (RT_UNLIKELY(cNsToDeadline >= TMCLOCK_FREQ_VIRTUAL))
    283         return SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
    284     uint64_t cTicks = ASMMultU64ByU32DivByU32(SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage),
    285                                               cNsToDeadline,
    286                                               TMCLOCK_FREQ_VIRTUAL);
     319        return uCpuHz;
     320    uint64_t cTicks = ASMMultU64ByU32DivByU32(uCpuHz, cNsToDeadline, TMCLOCK_FREQ_VIRTUAL);
    287321    if (cTicks > 4000)
    288322        cTicks -= 4000; /* fudge to account for overhead */
     
    298332 *
    299333 * @returns The number of host CPU clock ticks to the next timer deadline.
     334 * @param   pVM             Pointer to the cross context VM structure.
    300335 * @param   pVCpu           The current CPU.
    301  * @param   poffRealTSC     The offset against the TSC of the current CPU.
    302  * @param   pfOffsettedTsc  Where to store whether TSC offsetting can be used.
    303  * @param   pfParavirtTsc   Where to store whether paravirt. TSC is enabled.
     336 * @param   poffRealTsc     The offset against the TSC of the current host CPU,
     337 *                          if pfOffsettedTsc is set to true.
     338 * @param   pfOffsettedTsc  Where to return whether TSC offsetting can be used.
     339 * @param   pfParavirtTsc   Where to return whether paravirt TSC is enabled.
    304340 *
    305341 * @thread  EMT(pVCpu).
    306342 * @see     TMCpuTickCanUseRealTSC().
    307343 */
    308 VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfOffsettedTsc,
    309                                                         bool *pfParavirtTsc)
    310 {
    311     PVM      pVM = pVCpu->CTX_SUFF(pVM);
    312     uint64_t cTicksToDeadline;
     344VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVM pVM, PVMCPU pVCpu, uint64_t *poffRealTsc,
     345                                                        bool *pfOffsettedTsc, bool *pfParavirtTsc)
     346{
     347    Assert(pVCpu->tm.s.fTSCTicking);
     348
     349    *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
    313350
    314351    /*
    315      * We require:
    316      *     1. A fixed TSC, this is checked at init time.
    317      *     2. That the TSC is ticking (we shouldn't be here if it isn't)
    318      *     3. Either that we're using the real TSC as time source or
    319      *          a) we don't have any lag to catch up, and
    320      *          b) the virtual sync clock hasn't been halted by an expired timer, and
    321      *          c) we're not using warp drive (accelerated virtual guest time).
     352     * Same logic as in TMCpuTickCanUseRealTSC.
    322353     */
    323     Assert(pVCpu->tm.s.fTSCTicking);
    324     *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
    325 
    326354    if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
    327355    {
    328         /* The source is the real TSC. */
    329         *poffRealTSC    = 0 - pVCpu->tm.s.offTSCRawSrc;
    330         *pfOffsettedTsc = true;
    331         cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
    332         return cTicksToDeadline;
    333     }
    334 
     356        /** @todo We should negate both deltas!  It's soo weird that we do the
     357         *        exact opposite of what the hardware implements. */
     358#ifdef IN_RING3
     359        *poffRealTsc     = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDelta();
     360#else
     361        *poffRealTsc     = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet);
     362#endif
     363        *pfOffsettedTsc  = true;
     364        return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM));
     365    }
     366
     367    /*
     368     * Same logic as in TMCpuTickCanUseRealTSC.
     369     */
    335370    if (   pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
    336371        && !pVM->tm.s.fVirtualSyncCatchUp
     
    345380                        : u64NowVirtSync;
    346381        u64Now -= pVCpu->tm.s.offTSCRawSrc;
    347         *poffRealTSC     = u64Now - ASMReadTSC();        /** @todo replace with SUPReadTSC() eventually. */
     382        *poffRealTsc     = u64Now - ASMReadTSC();
    348383        *pfOffsettedTsc  = u64Now >= pVCpu->tm.s.u64TSCLastSeen;
    349         cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline);
    350         return cTicksToDeadline;
     384        return tmCpuCalcTicksToDeadline(pVCpu, cNsToDeadline);
    351385    }
    352386
     
    355389#endif
    356390    *pfOffsettedTsc  = false;
    357     *poffRealTSC     = 0;
    358     cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
    359     return cTicksToDeadline;
     391    *poffRealTsc     = 0;
     392    return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM));
    360393}
    361394
     
    375408        PVM pVM = pVCpu->CTX_SUFF(pVM);
    376409        if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
    377             u64 = ASMReadTSC();
     410            u64 = SUPReadTsc();
    378411        else
    379412            u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers);
     
    497530        && g_pSUPGlobalInfoPage->u32Mode != SUPGIPMODE_INVARIANT_TSC)
    498531    {
    499         uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
     532#ifdef IN_RING3
     533        uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
     534#elif defined(IN_RING0)
     535        uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, RTMpCpuIdToSetIndex(RTMpCpuId()));
     536#else
     537        uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, VMMGetCpu(pVM)->iHostCpuSet);
     538#endif
    500539        if (RT_LIKELY(cTSCTicksPerSecond != ~(uint64_t)0))
    501540            return cTSCTicksPerSecond;
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r54277 r54308  
    14031403
    14041404    /* Clear the VCPU <-> host CPU mapping as we've left HM context. */
    1405     ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
     1405    ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); /** @todo r=bird: This is VMMR0.cpp's job, isn't it? */
    14061406
    14071407    return VINF_SUCCESS;
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r54196 r54308  
    22512251 * intercepts.
    22522252 *
     2253 * @param   pVM         The shared VM handle.
    22532254 * @param   pVCpu       Pointer to the VMCPU.
    22542255 *
    22552256 * @remarks No-long-jump zone!!!
    22562257 */
    2257 static void hmR0SvmUpdateTscOffsetting(PVMCPU pVCpu)
    2258 {
    2259     bool fParavirtTsc;
    2260     bool fCanUseRealTsc;
     2258static void hmR0SvmUpdateTscOffsetting(PVM pVM, PVMCPU pVCpu)
     2259{
     2260    bool     fParavirtTsc;
    22612261    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    2262     fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset, &fParavirtTsc);
     2262    bool fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &pVmcb->ctrl.u64TSCOffset, &fParavirtTsc);
    22632263    if (fCanUseRealTsc)
    22642264    {
     
    22792279    if (fParavirtTsc)
    22802280    {
    2281         int rc = GIMR0UpdateParavirtTsc(pVCpu->CTX_SUFF(pVM), 0 /* u64Offset */);
     2281        int rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
    22822282        AssertRC(rc);
    22832283        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt);
     
    30683068        || idCurrentCpu != pVCpu->hm.s.idLastCpu)
    30693069    {
    3070         hmR0SvmUpdateTscOffsetting(pVCpu);
     3070        hmR0SvmUpdateTscOffsetting(pVM, pVCpu);
    30713071        pSvmTransient->fUpdateTscOffsetting = false;
    30723072    }
     
    31903190
    31913191    if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC))
    3192         TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset);     /** @todo use SUPReadTSC() eventually. */
     3192        TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset);
    31933193
    31943194    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r54196 r54308  
    56035603 *
    56045604 * @returns VBox status code.
     5605 * @param   pVM             Pointer to the cross context VM structure.
    56055606 * @param   pVCpu           Pointer to the VMCPU.
    56065607 *
    56075608 * @remarks No-long-jump zone!!!
    56085609 */
    5609 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu)
    5610 {
    5611     int  rc            = VERR_INTERNAL_ERROR_5;
    5612     bool fOffsettedTsc = false;
    5613     bool fParavirtTsc  = false;
    5614     PVM pVM            = pVCpu->CTX_SUFF(pVM);
     5610static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVM pVM, PVMCPU pVCpu)
     5611{
     5612    int  rc;
     5613    bool fOffsettedTsc;
     5614    bool fParavirtTsc;
    56155615    if (pVM->hm.s.vmx.fUsePreemptTimer)
    56165616    {
    5617         uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fOffsettedTsc,
    5618                                                                      &fParavirtTsc);
     5617        uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset,
     5618                                                                     &fOffsettedTsc, &fParavirtTsc);
    56195619
    56205620        /* Make sure the returned values have sane upper and lower boundaries. */
    5621         uint64_t u64CpuHz  = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
     5621        uint64_t u64CpuHz  = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);
    56225622        cTicksToDeadline   = RT_MIN(cTicksToDeadline, u64CpuHz / 64);      /* 1/64th of a second */
    56235623        cTicksToDeadline   = RT_MAX(cTicksToDeadline, u64CpuHz / 2048);    /* 1/2048th of a second */
     
    56285628    }
    56295629    else
    5630         fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc);
     5630        fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc);
    56315631
    56325632    /** @todo later optimize this to be done elsewhere and not before every
     
    86378637        || idCurrentCpu != pVCpu->hm.s.idLastCpu)
    86388638    {
    8639         hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu);
     8639        hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVM, pVCpu);
    86408640        pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
    86418641    }
     
    87188718
    87198719    if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
    8720         TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset);     /** @todo use SUPReadTSC() eventually. */
     8720        TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset);
    87218721
    87228722    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
  • trunk/src/VBox/VMM/VMMR3/TM.cpp

    r54292 r54308  
    182182static DECLCALLBACK(void)   tmR3TimerInfoActive(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    183183static DECLCALLBACK(void)   tmR3InfoClocks(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    184 static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirtToggle(PVM pVM, PVMCPU pVCpu, void *pvData);
     184static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirtDisable(PVM pVM, PVMCPU pVCpu, void *pvData);
    185185
    186186
     
    934934     * Use GIP when available.
    935935     */
    936     uint64_t u64Hz = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
     936    uint64_t u64Hz = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
    937937    if (g_pSUPGlobalInfoPage->u32Mode == SUPGIPMODE_INVARIANT_TSC)
    938938    {
     
    954954        }
    955955
    956         u64Hz = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
     956        u64Hz = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage);
    957957        if (u64Hz != UINT64_MAX)
    958958            return u64Hz;
     
    11821182    {
    11831183        bool fParavirtTSC = false;
    1184         tmR3CpuTickParavirtToggle(pVM, NULL /* pVCpuEmt */, &fParavirtTSC);
     1184        tmR3CpuTickParavirtDisable(pVM, NULL, NULL);
    11851185    }
    11861186    Assert(!GIMIsParavirtTscEnabled(pVM));
     
    30933093
    30943094/**
    3095  * Switch TM TSC mode to the most appropriate/efficient one.
    3096  *
    3097  * @returns strict VBox status code.
    3098  * @param   pVM         Pointer to the VM.
    3099  * @param   pVCpuEmt    Pointer to the VMCPU it's called on, can be NULL.
    3100  * @param   pvData      Opaque pointer to whether usage of paravirt. TSC is
    3101  *                      enabled or disabled by the guest OS.
    3102  *
    3103  * @thread  EMT.
    3104  * @remarks Must only be called during an EMTs rendezvous.
    3105  */
    3106 static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirtToggle(PVM pVM, PVMCPU pVCpuEmt, void *pvData)
    3107 {
    3108     Assert(pVM);
    3109     Assert(pvData);
    3110     Assert(pVM->tm.s.fTSCModeSwitchAllowed);
    3111     NOREF(pVCpuEmt);
    3112 
    3113     bool *pfEnable = (bool *)pvData;
    3114     if (*pfEnable)
    3115     {
    3116         if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET)
     3095 * @callback_method_impl{PFNVMMEMTRENDEZVOUS,
     3096 *      Worker for TMR3CpuTickParavirtEnable}
     3097 */
     3098static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirtEnable(PVM pVM, PVMCPU pVCpuEmt, void *pvData)
     3099{
     3100    AssertPtr(pVM); Assert(pVM->tm.s.fTSCModeSwitchAllowed); NOREF(pVCpuEmt); NOREF(pvData);
     3101    Assert(pVCpuEmt->tm.s.fTSCTicking);
     3102
     3103    if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET)
     3104    {
     3105        if (tmR3HasFixedTSC(pVM))
    31173106        {
    3118             if (tmR3HasFixedTSC(pVM))
    3119             {
    3120                 uint64_t u64NowVirtSync = TMVirtualSyncGetNoCheck(pVM);
    3121                 uint64_t u64Now = ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL);
    3122                 uint32_t cCpus  = pVM->cCpus;
    3123                 uint64_t u64RealTSC = ASMReadTSC();     /** @todo should use SUPReadTsc() */
    3124                 for (uint32_t i = 0; i < cCpus; i++)
    3125                 {
    3126                     PVMCPU   pVCpu = &pVM->aCpus[i];
    3127                     uint64_t u64TickOld = u64Now - pVCpu->tm.s.offTSCRawSrc;
    3128 
    3129                     /*
    3130                      * The return value of TMCpuTickGet() and the guest's TSC value (u64Tick) must
    3131                      * remain constant across the TM TSC mode-switch.
    3132                      * OldTick = VrSync - CurOff
    3133                      * NewTick = RealTsc - NewOff
    3134                      * NewTick = OldTick
    3135                      *  => RealTsc - NewOff = VrSync - CurOff
    3136                      *  => NewOff = CurOff + RealTsc - VrSync
    3137                      */
    3138                     pVCpu->tm.s.offTSCRawSrc = pVCpu->tm.s.offTSCRawSrc + u64RealTSC  - u64Now;
    3139 
    3140                     /* If the new offset results in the TSC going backwards, re-adjust the offset. */
    3141                     if (u64RealTSC - pVCpu->tm.s.offTSCRawSrc < u64TickOld)
    3142                         pVCpu->tm.s.offTSCRawSrc += u64TickOld - u64RealTSC;
    3143                     Assert(u64RealTSC - pVCpu->tm.s.offTSCRawSrc >= u64TickOld);
    3144                 }
    3145                 pVM->tm.s.enmTSCMode = TMTSCMODE_REAL_TSC_OFFSET;
    3146                 LogRel(("TM: Switched TSC mode. New enmTSCMode=%d (%s)\n", pVM->tm.s.enmTSCMode, tmR3GetTSCModeName(pVM)));
    3147             }
    3148             else
    3149                 LogRel(("TM: Host is not suitable for using TSC mode (%d - %s). Request to change TSC mode ignored.\n",
    3150                         TMTSCMODE_REAL_TSC_OFFSET, tmR3GetTSCModeNameEx(TMTSCMODE_REAL_TSC_OFFSET)));
    3151         }
    3152     }
    3153     else
    3154     {
    3155         if (   pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET
    3156             && pVM->tm.s.enmTSCMode != pVM->tm.s.enmOriginalTSCMode)
    3157         {
    3158             uint64_t u64NowVirtSync = TMVirtualSyncGetNoCheck(pVM);
    3159             uint64_t u64Now     = ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL);
    3160             uint64_t u64RealTSC = ASMReadTSC();          /** @todo replace with SUPReadTSC() eventually. */
    3161             uint32_t cCpus      = pVM->cCpus;
     3107            /*
     3108             * The return value of TMCpuTickGet() and the guest's TSC value for each
     3109             * CPU must remain constant across the TM TSC mode-switch.  Thus we have
     3110             * the following equation (new/old signifies the new/old tsc modes):
     3111             *      uNewTsc = uOldTsc
     3112             *
     3113             * Where (see tmCpuTickGetInternal):
     3114             *      uOldTsc = uRawOldTsc - offTscRawSrcOld
     3115             *      uNewTsc = uRawNewTsc - offTscRawSrcNew
     3116             *
     3117             * Solve it for offTscRawSrcNew without replacing uOldTsc:
     3118             *     uRawNewTsc - offTscRawSrcNew = uOldTsc
     3119             *  => -offTscRawSrcNew = uOldTsc - uRawNewTsc
     3120             *  => offTscRawSrcNew  = uRawNewTsc - uOldTsc
     3121             */
     3122            uint64_t uRawOldTsc = tmR3CpuTickGetRawVirtualNoCheck(pVM);
     3123            uint64_t uRawNewTsc = SUPReadTsc();
     3124            uint32_t cCpus = pVM->cCpus;
    31623125            for (uint32_t i = 0; i < cCpus; i++)
    31633126            {
    3164                 PVMCPU   pVCpu      = &pVM->aCpus[i];
    3165                 uint64_t u64TickOld = u64RealTSC - pVCpu->tm.s.offTSCRawSrc;
    3166 
    3167                 /* Update the last-seen tick here as we havent't been updating it (as we don't
    3168                    need it) while in pure TSC-offsetting mode. */
    3169                 pVCpu->tm.s.u64TSCLastSeen = pVCpu->tm.s.u64TSC;
    3170 
    3171                 /*
    3172                  * The return value of TMCpuTickGet() and the guest's TSC value (u64Tick) must
    3173                  * remain constant across the TM TSC mode-switch.
    3174                  * OldTick = RealTsc - CurOff
    3175                  * NewTick = VrSync - NewOff
    3176                  * NewTick = OldTick
    3177                  *  => VrSync - NewOff = RealTsc - CurOff
    3178                  *  => NewOff = CurOff + VrSync - RealTsc
    3179                  */
    3180                 pVCpu->tm.s.offTSCRawSrc = pVCpu->tm.s.offTSCRawSrc + u64Now - u64RealTSC;
    3181 
    3182                 /* If the new offset results in the TSC going backwards, re-adjust the offset. */
    3183                 if (u64Now - pVCpu->tm.s.offTSCRawSrc < u64TickOld)
    3184                     pVCpu->tm.s.offTSCRawSrc += u64TickOld - u64Now;
    3185                 Assert(u64Now - pVCpu->tm.s.offTSCRawSrc >= u64TickOld);
     3127                PVMCPU   pVCpu   = &pVM->aCpus[i];
     3128                uint64_t uOldTsc = uRawOldTsc - pVCpu->tm.s.offTSCRawSrc;
     3129                pVCpu->tm.s.offTSCRawSrc = uRawNewTsc - uOldTsc;
     3130                Assert(uRawNewTsc - pVCpu->tm.s.offTSCRawSrc >= uOldTsc); /* paranoia^256 */
    31863131            }
    3187             pVM->tm.s.enmTSCMode = pVM->tm.s.enmOriginalTSCMode;
     3132
     3133            pVM->tm.s.enmTSCMode = TMTSCMODE_REAL_TSC_OFFSET;
    31883134            LogRel(("TM: Switched TSC mode. New enmTSCMode=%d (%s)\n", pVM->tm.s.enmTSCMode, tmR3GetTSCModeName(pVM)));
    31893135        }
     3136        else
     3137            LogRel(("TM: Host is not suitable for using TSC mode (%d - %s). Request to change TSC mode ignored.\n",
     3138                    TMTSCMODE_REAL_TSC_OFFSET, tmR3GetTSCModeNameEx(TMTSCMODE_REAL_TSC_OFFSET)));
    31903139    }
    31913140    return VINF_SUCCESS;
     
    31953144/**
    31963145 * Notify TM that the guest has enabled usage of a paravirtualized TSC.
     3146 *
     3147 * This may perform a EMT rendezvous and change the TSC virtualization mode.
    31973148 *
    31983149 * @returns VBox status code.
     
    32033154    int rc = VINF_SUCCESS;
    32043155    if (pVM->tm.s.fTSCModeSwitchAllowed)
    3205     {
    3206         bool fEnable = true;
    3207         rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, tmR3CpuTickParavirtToggle, (void *)&fEnable);
    3208     }
     3156        rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, tmR3CpuTickParavirtEnable, NULL);
    32093157    pVM->tm.s.fParavirtTscEnabled = true;
    32103158    return rc;
     
    32133161
    32143162/**
     3163 * @callback_method_impl{PFNVMMEMTRENDEZVOUS,
     3164 *      Worker for TMR3CpuTickParavirtDisable}
     3165 */
     3166static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirtDisable(PVM pVM, PVMCPU pVCpuEmt, void *pvData)
     3167{
     3168    AssertPtr(pVM); Assert(pVM->tm.s.fTSCModeSwitchAllowed); NOREF(pVCpuEmt);
     3169    Assert(pVCpuEmt->tm.s.fTSCTicking);
     3170
     3171    if (   pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET
     3172        && pVM->tm.s.enmTSCMode != pVM->tm.s.enmOriginalTSCMode)
     3173    {
     3174        /*
     3175         * See tmR3CpuTickParavirtEnable for an explanation of the conversion math.
     3176         */
     3177        uint64_t uRawOldTsc = SUPReadTsc();
     3178        uint64_t uRawNewTsc = tmR3CpuTickGetRawVirtualNoCheck(pVM);
     3179        uint32_t cCpus = pVM->cCpus;
     3180        for (uint32_t i = 0; i < cCpus; i++)
     3181        {
     3182            PVMCPU   pVCpu   = &pVM->aCpus[i];
     3183            uint64_t uOldTsc = uRawOldTsc - pVCpu->tm.s.offTSCRawSrc;
     3184            pVCpu->tm.s.offTSCRawSrc = uRawNewTsc - uOldTsc;
     3185            Assert(uRawNewTsc - pVCpu->tm.s.offTSCRawSrc >= uOldTsc); /* paranoia^256 */
     3186
     3187            /* Update the last-seen tick here as we havent't been updating it (as we don't
     3188               need it) while in pure TSC-offsetting mode. */
     3189#if 0 /** @todo r=bird: Why use the TSC value from the last time we paused the TSC? Makes more sense to use uOldTsc doesn't it? */
     3190            pVCpu->tm.s.u64TSCLastSeen = pVCpu->tm.s.u64TSC;
     3191#else
     3192            pVCpu->tm.s.u64TSCLastSeen = uOldTsc;
     3193#endif
     3194        }
     3195        pVM->tm.s.enmTSCMode = pVM->tm.s.enmOriginalTSCMode;
     3196        LogRel(("TM: Switched TSC mode. New enmTSCMode=%d (%s)\n", pVM->tm.s.enmTSCMode, tmR3GetTSCModeName(pVM)));
     3197    }
     3198    return VINF_SUCCESS;
     3199}
     3200
     3201
     3202/**
    32153203 * Notify TM that the guest has disabled usage of a paravirtualized TSC.
     3204 *
     3205 * If TMR3CpuTickParavirtEnable changed the TSC virtualization mode, this will
     3206 * perform an EMT  rendezvous to revert those changes.
    32163207 *
    32173208 * @returns VBox status code.
     
    32223213    int rc = VINF_SUCCESS;
    32233214    if (pVM->tm.s.fTSCModeSwitchAllowed)
    3224     {
    3225         bool fEnable = false;
    3226         rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, tmR3CpuTickParavirtToggle, (void *)&fEnable);
    3227     }
     3215        rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, tmR3CpuTickParavirtDisable, NULL);
    32283216    pVM->tm.s.fParavirtTscEnabled = false;
    32293217    return rc;
  • trunk/src/VBox/VMM/VMMR3/VMMTests.cpp

    r50115 r54308  
    495495            }
    496496            uint64_t Ticks = ASMReadTSC() - StartTick;
    497             if (Ticks < (SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage) / 10000))
    498                 RTPrintf("Warning: Ticks=%RU64 (< %RU64)\n", Ticks, SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage) / 10000);
     497            if (Ticks < (SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage) / 10000))
     498                RTPrintf("Warning: Ticks=%RU64 (< %RU64)\n", Ticks, SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage) / 10000);
    499499        }
    500500
  • trunk/src/VBox/VMM/VMMRC/VMMRC.cpp

    r49893 r54308  
    136136        case VMMGC_DO_TESTCASE_INTERRUPT_MASKING:
    137137        {
    138             uint64_t u64MaxTicks = (SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage) != ~(uint64_t)0
    139                                     ? SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage)
     138            uint64_t u64MaxTicks = (SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage) != ~(uint64_t)0
     139                                    ? SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage)
    140140                                    : _2G)
    141141                                   / 10000;
  • trunk/src/VBox/VMM/include/TMInternal.h

    r54270 r54308  
    759759#endif
    760760
     761uint64_t                tmR3CpuTickGetRawVirtualNoCheck(PVM pVM);
    761762int                     tmCpuTickPause(PVMCPU pVCpu);
    762763int                     tmCpuTickPauseLocked(PVM pVM, PVMCPU pVCpu);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette