VirtualBox

Ignore:
Timestamp:
Feb 3, 2015 10:45:39 AM (10 years ago)
Author:
vboxsync
Message:

VMM: Implemented TM TSC-mode switching with paravirtualized guests.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp

    r53441 r54065  
    55
    66/*
    7  * Copyright (C) 2006-2014 Oracle Corporation
     7 * Copyright (C) 2006-2015 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    174174 * Record why we refused to use offsetted TSC.
    175175 *
    176  * Used by TMCpuTickCanUseRealTSC and TMCpuTickGetDeadlineAndTscOffset.
     176 * Used by TMCpuTickCanUseRealTSC() and TMCpuTickGetDeadlineAndTscOffset().
    177177 *
    178178 * @param   pVM         Pointer to the VM.
     
    213213 * @param   pVCpu           Pointer to the VMCPU.
    214214 * @param   poffRealTSC     The offset against the TSC of the current CPU.
    215  *                          Can be NULL.
    216  * @param   pfParavirtTsc   Where to store whether paravirt. TSC can be used or
    217  *                          not.
    218  * @thread EMT(pVCpu).
     215 * @param   pfParavirtTsc   Where to store whether paravirt. TSC is enabled.
     216 *
     217 * @thread  EMT(pVCpu).
     218 * @see     TMCpuTickGetDeadlineAndTscOffset().
    219219 */
    220220VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfParavirtTsc)
    221221{
    222222    PVM pVM = pVCpu->CTX_SUFF(pVM);
    223     bool fParavirtTsc = false;
     223    bool fOffsettedTsc = false;
    224224
    225225    /*
    226226     * We require:
    227      *     1. Use of a paravirtualized TSC is enabled by the guest.
    228      *     (OR)
    229227     *     1. A fixed TSC, this is checked at init time.
    230228     *     2. That the TSC is ticking (we shouldn't be here if it isn't)
     
    234232     *          c) we're not using warp drive (accelerated virtual guest time).
    235233     */
    236     *pfParavirtTsc = GIMIsParavirtTscEnabled(pVM);
    237     if (    pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
    238         &&  RT_LIKELY(pVCpu->tm.s.fTSCTicking)
    239         &&  (   pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET
    240              || (   !pVM->tm.s.fVirtualSyncCatchUp
    241                  && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
    242                  && !pVM->tm.s.fVirtualWarpDrive)))
    243     {
    244         if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET)
    245         {
    246             /* The source is the timer synchronous virtual clock. */
    247             if (poffRealTSC)
    248             {
    249                 uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
    250                                 - pVCpu->tm.s.offTSCRawSrc;
    251                 /** @todo When we start collecting statistics on how much time we spend executing
    252                  * guest code before exiting, we should check this against the next virtual sync
    253                  * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase
    254                  * the chance that we'll get interrupted right after the timer expired. */
    255                 *poffRealTSC = u64Now - ASMReadTSC();
    256             }
    257         }
    258         else if (poffRealTSC)
    259         {
    260             /* The source is the real TSC. */
    261             *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc;
    262         }
    263         /** @todo count this? */
    264         return true;
     234    Assert(pVCpu->tm.s.fTSCTicking);
     235    *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
     236
     237    if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
     238    {
     239        /* The source is the real TSC. */
     240        *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc;
     241        return true;    /** @todo count this? */
     242    }
     243
     244    if (   pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
     245        && !pVM->tm.s.fVirtualSyncCatchUp
     246        && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
     247        && !pVM->tm.s.fVirtualWarpDrive)
     248    {
     249        /* The source is the timer synchronous virtual clock. */
     250        uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */)
     251                        - pVCpu->tm.s.offTSCRawSrc;
     252        /** @todo When we start collecting statistics on how much time we spend executing
     253         * guest code before exiting, we should check this against the next virtual sync
     254         * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase
     255         * the chance that we'll get interrupted right after the timer expired. */
     256        uint64_t u64TSC = ASMReadTSC();     /** @todo should be replaced with SUPReadTSC() eventually. */
     257        *poffRealTSC = u64Now - u64TSC;
     258        fOffsettedTsc = u64Now >= pVCpu->tm.s.u64TSCLastSeen;
     259        return true;    /** @todo count this? */
    265260    }
    266261
     
    304299 * @returns The number of host CPU clock ticks to the next timer deadline.
    305300 * @param   pVCpu           The current CPU.
    306  * @param   pfParavirtTsc   Where to store whether paravirt. TSC can be used or
    307  *                          not.
    308301 * @param   poffRealTSC     The offset against the TSC of the current CPU.
     302 * @param   pfOffsettedTsc  Where to store whether TSC offsetting can be used.
     303 * @param   pfParavirtTsc   Where to store whether paravirt. TSC is enabled.
    309304 *
    310305 * @thread  EMT(pVCpu).
    311  * @remarks Superset of TMCpuTickCanUseRealTSC().
    312  */
    313 VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, bool *pfParavirtTsc,
    314                                                         uint64_t *poffRealTSC)
    315 {
    316     PVM         pVM = pVCpu->CTX_SUFF(pVM);
    317     uint64_t    cTicksToDeadline;
     306 * @see    TMCpuTickCanUseRealTSC().
     307 */
     308VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfOffsettedTsc,
     309                                                        bool *pfParavirtTsc)
     310{
     311    PVM      pVM = pVCpu->CTX_SUFF(pVM);
     312    uint64_t cTicksToDeadline;
    318313
    319314    /*
    320315     * We require:
    321      *     1. Use of a paravirtualized TSC is enabled by the guest.
    322      *     (OR)
    323316     *     1. A fixed TSC, this is checked at init time.
    324317     *     2. That the TSC is ticking (we shouldn't be here if it isn't)
     
    328321     *          c) we're not using warp drive (accelerated virtual guest time).
    329322     */
    330     *pfParavirtTsc = GIMIsParavirtTscEnabled(pVM);
    331     if (    pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
    332         &&  RT_LIKELY(pVCpu->tm.s.fTSCTicking)
    333         &&  (   pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET
    334              || (   !pVM->tm.s.fVirtualSyncCatchUp
    335                  && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
    336                  && !pVM->tm.s.fVirtualWarpDrive)))
    337     {
     323    Assert(pVCpu->tm.s.fTSCTicking);
     324    *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;
     325
     326    if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
     327    {
     328        /* The source is the real TSC. */
     329        *poffRealTSC    = 0 - pVCpu->tm.s.offTSCRawSrc;
    338330        *pfOffsettedTsc = true;
    339         if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET)
    340         {
    341             /* The source is the timer synchronous virtual clock. */
    342             uint64_t cNsToDeadline;
    343             uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline);
    344             uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */
    345                             ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL)
    346                             : u64NowVirtSync;
    347             u64Now -= pVCpu->tm.s.offTSCRawSrc;
    348             *poffRealTSC = u64Now - ASMReadTSC();
    349             cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline);
    350         }
    351         else
    352         {
    353             /* The source is the real TSC. */
    354             *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc;
    355             cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
    356         }
    357     }
    358     else
    359     {
     331        cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
     332        return cTicksToDeadline;
     333    }
     334
     335    if (   pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC
     336        && !pVM->tm.s.fVirtualSyncCatchUp
     337        && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking)
     338        && !pVM->tm.s.fVirtualWarpDrive)
     339    {
     340        /* The source is the timer synchronous virtual clock. */
     341        uint64_t cNsToDeadline;
     342        uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline);
     343        uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */
     344                        ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL)
     345                        : u64NowVirtSync;
     346        u64Now -= pVCpu->tm.s.offTSCRawSrc;
     347        *poffRealTSC     = u64Now - ASMReadTSC();        /** @todo replace with SUPReadTSC() eventually. */
     348        *pfOffsettedTsc  = u64Now >= pVCpu->tm.s.u64TSCLastSeen;
     349        cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline);
     350        return cTicksToDeadline;
     351    }
     352
    360353#ifdef VBOX_WITH_STATISTICS
    361         tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
     354    tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu);
    362355#endif
    363         *pfOffsettedTsc  = false;
    364         *poffRealTSC     = 0;
    365         cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
    366     }
    367 
     356    *pfOffsettedTsc  = false;
     357    *poffRealTSC     = 0;
     358    cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM));
    368359    return cTicksToDeadline;
    369360}
     
    395386        {
    396387            STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow);
    397             pVCpu->tm.s.u64TSCLastSeen += 64;   /* @todo choose a good increment here */
     388            pVCpu->tm.s.u64TSCLastSeen += 64;   /** @todo choose a good increment here */
    398389            u64 = pVCpu->tm.s.u64TSCLastSeen;
    399390        }
     
    503494VMMDECL(uint64_t) TMCpuTicksPerSecond(PVM pVM)
    504495{
    505     /** @todo revisit this, not sure why we need to get the rate from GIP for
    506      *        real-tsc-offset. */
    507     if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)
     496    if (   pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET
     497        && g_pSUPGlobalInfoPage->u32Mode != SUPGIPMODE_INVARIANT_TSC)
    508498    {
    509499        uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette