VirtualBox

Changeset 105698 in vbox


Ignore:
Timestamp:
Aug 15, 2024 11:33:49 PM (7 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
164398
Message:

VMM/IEM,TM: Adaptive timer polling and running of the timer queues from the IEM recompiler execution loop. bugref:10656

Location:
trunk
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/iem.h

    r104991 r105698  
    159159VMMDECL(VBOXSTRICTRC)       IEMExecOneIgnoreLock(PVMCPUCC pVCpu);
    160160VMMDECL(VBOXSTRICTRC)       IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions);
    161 VMM_INT_DECL(VBOXSTRICTRC)  IEMExecRecompiler(PVMCC pVM, PVMCPUCC pVCpu);
     161VMM_INT_DECL(VBOXSTRICTRC)  IEMExecRecompiler(PVMCC pVM, PVMCPUCC pVCpu, bool fWasHalted);
    162162/** Statistics returned by IEMExecForExits. */
    163163typedef struct IEMEXECFOREXITSTATS
  • trunk/include/VBox/vmm/tm.h

    r105673 r105698  
    273273VMMDECL(bool)           TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu);
    274274VMM_INT_DECL(bool)      TMTimerPollBoolWith32BitMilliTS(PVMCC pVM, PVMCPUCC pVCpu, uint32_t *pmsNow);
    275 VMM_INT_DECL(bool)      TMTimerPollBoolWithNanoTS(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pnsNow);
     275VMM_INT_DECL(uint64_t)  TMTimerPollBoolWithNanoTS(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pnsNow);
    276276VMM_INT_DECL(void)      TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu);
    277277VMM_INT_DECL(uint64_t)  TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta);
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompBltIn.cpp

    r105673 r105698  
    221221    {
    222222# ifdef RT_ARCH_AMD64
    223         /* dec  [rbx + cIrqChecksTillNextPoll] */
     223        /* dec  [rbx + cTbsTillNextTimerPoll] */
    224224        pCodeBuf[off++] = 0xff;
    225         off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 1, RT_UOFFSETOF(VMCPU, iem.s.cIrqChecksTillNextPoll));
     225        off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 1, RT_UOFFSETOF(VMCPU, iem.s.cTbsTillNextTimerPoll));
    226226
    227227        /* jz   ReturnBreakFF */
     
    229229
    230230# elif defined(RT_ARCH_ARM64)
    231         AssertCompile(RTASSERT_OFFSET_OF(VMCPU, iem.s.cIrqChecksTillNextPoll) < _4K * sizeof(uint32_t));
    232         off = iemNativeEmitLoadGprFromVCpuU32Ex(pCodeBuf, off, idxTmpReg1, RT_UOFFSETOF(VMCPU, iem.s.cIrqChecksTillNextPoll));
     231        AssertCompile(RTASSERT_OFFSET_OF(VMCPU, iem.s.cTbsTillNextTimerPoll) < _4K * sizeof(uint32_t));
     232        off = iemNativeEmitLoadGprFromVCpuU32Ex(pCodeBuf, off, idxTmpReg1, RT_UOFFSETOF(VMCPU, iem.s.cTbsTillNextTimerPoll));
    233233        pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxTmpReg1, idxTmpReg1, 1, false /*f64Bit*/);
    234         off = iemNativeEmitStoreGprToVCpuU32Ex(pCodeBuf, off, idxTmpReg1, RT_UOFFSETOF(VMCPU, iem.s.cIrqChecksTillNextPoll));
     234        off = iemNativeEmitStoreGprToVCpuU32Ex(pCodeBuf, off, idxTmpReg1, RT_UOFFSETOF(VMCPU, iem.s.cTbsTillNextTimerPoll));
    235235
    236236        /* cbz reg1, ReturnBreakFF */
     
    398398
    399399/**
    400  * Built-in function that works the cIrqChecksTillNextPoll counter on direct TB
     400 * Built-in function that works the cTbsTillNextTimerPoll counter on direct TB
    401401 * linking, like loop-jumps.
    402402 */
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp

    r105673 r105698  
    199199
    200200/**
    201  * Built-in function that works the cIrqChecksTillNextPoll counter on direct TB
     201 * Built-in function that works the cTbsTillNextTimerPoll counter on direct TB
    202202 * linking, like loop-jumps.
    203203 */
    204204IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckTimers)
    205205{
    206     if (RT_LIKELY(--pVCpu->iem.s.cIrqChecksTillNextPoll > 0))
     206    if (RT_LIKELY(--pVCpu->iem.s.cTbsTillNextTimerPoll > 0))
    207207        return VINF_SUCCESS;
    208208
     
    219219IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckTimersAndIrq)
    220220{
    221     if (RT_LIKELY(--pVCpu->iem.s.cIrqChecksTillNextPoll > 0))
     221    if (RT_LIKELY(--pVCpu->iem.s.cTbsTillNextTimerPoll > 0))
    222222        return iemThreadedFunc_BltIn_CheckIrqCommon(pVCpu);
    223223
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdRecompiler.cpp

    r105673 r105698  
    29202920*********************************************************************************************************************************/
    29212921
     2922/** Default TB factor.
     2923 * This is basically the number of nanoseconds we guess executing a TB takes
     2924 * on average.  We estimates it high if we can.
     2925 * @note Best if this is a power of two so it can be translated to a shift.  */
     2926#define IEM_TIMER_POLL_DEFAULT_FACTOR   UINT32_C(64)
     2927/** The minimum number of nanoseconds we can allow between timer pollings.
     2928 * This must take the cost of TMTimerPollBoolWithNanoTS into mind.  We put that
     2929 * cost at 104 ns now, thus this constant is at 256 ns. */
     2930#define IEM_TIMER_POLL_MIN_NS           UINT32_C(256)
     2931/** The IEM_TIMER_POLL_MIN_NS value roughly translated to TBs, with some grains
     2932 * of salt thrown in.
     2933 * The idea is that we will be able to make progress with guest code execution
     2934 * before polling timers and between running timers. */
     2935#define IEM_TIMER_POLL_MIN_ITER         UINT32_C(12)
     2936/** The maximum number of nanoseconds we can allow between timer pollings.
     2937 * This probably shouldn't be too high, as we don't have any timer
     2938 * reprogramming feedback in the polling code.  So, when a device reschedule a
     2939 * timer for an earlier delivery, we won't know about it.  */
     2940#define IEM_TIMER_POLL_MAX_NS           UINT32_C(8388608) /* 0x800000 ns = 8.4 ms */
     2941/** The IEM_TIMER_POLL_MAX_NS value roughly translated to TBs, with some grains
     2942 * of salt thrown in.
     2943 * This helps control fluctuations in the NU benchmark. */
     2944#define IEM_TIMER_POLL_MAX_ITER         _512K
     2945
     2946
     2947DECL_FORCE_INLINE(uint32_t) iemPollTimersCalcDefaultCountdown(uint64_t cNsDelta)
     2948{
     2949    if (cNsDelta >= IEM_TIMER_POLL_MAX_NS)
     2950        return RT_MIN(IEM_TIMER_POLL_MAX_NS / IEM_TIMER_POLL_DEFAULT_FACTOR, IEM_TIMER_POLL_MAX_ITER);
     2951
     2952    cNsDelta = RT_BIT_64(ASMBitFirstSetU32(cNsDelta) - 1); /* round down to power of 2 */
     2953    uint32_t const cRet = cNsDelta / IEM_TIMER_POLL_DEFAULT_FACTOR;
     2954    if (cRet >= IEM_TIMER_POLL_MIN_ITER)
     2955    {
     2956        if (cRet <= IEM_TIMER_POLL_MAX_ITER)
     2957            return cRet;
     2958        return IEM_TIMER_POLL_MAX_ITER;
     2959    }
     2960    return IEM_TIMER_POLL_MIN_ITER;
     2961}
     2962
     2963
    29222964/**
    29232965 * Helper for polling timers.
     
    29252967DECLHIDDEN(int) iemPollTimers(PVMCC pVM, PVMCPUCC pVCpu) RT_NOEXCEPT
    29262968{
    2927     /*
    2928      * Do the polling and calculate the time since the last time.
    2929      */
    2930     uint64_t       nsNow        = 0;
    2931     bool const     fExpired     = TMTimerPollBoolWithNanoTS(pVM, pVCpu, &nsNow);
     2969    STAM_PROFILE_START(&pVCpu->iem.s.StatTimerPoll, a);
     2970
     2971    /*
     2972     * Check for VM_FF_TM_VIRTUAL_SYNC and call TMR3VirtualSyncFF if set.
     2973     * This is something all EMTs can do.
     2974     */
     2975    /* If the virtual sync FF is set, respond to it. */
     2976    bool fRanTimers = VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
     2977    if (!fRanTimers)
     2978    { /* likely */ }
     2979    else
     2980    {
     2981        STAM_PROFILE_START(&pVCpu->iem.s.StatTimerPollRun, b);
     2982        TMR3VirtualSyncFF(pVM, pVCpu);
     2983        STAM_PROFILE_STOP(&pVCpu->iem.s.StatTimerPollRun, b);
     2984    }
     2985
     2986    /*
     2987     * Poll timers.
     2988     *
     2989     * On the 10980xe the polling averaging 314 ticks, with a min of 201, while
     2990     * running a norton utilities DOS benchmark program. TSC runs at 3GHz,
     2991     * translating that to 104 ns and 67 ns respectively. (An M2 booting win11
     2992     * has an average of 2 ticks / 84 ns.)
     2993     *
     2994     * With the same setup the TMR3VirtualSyncFF and else branch here profiles
     2995     * to 79751 ticks / 26583 ns on average, with a min of 1194 ticks / 398 ns.
     2996     * (An M2 booting win11 has an average of 24 ticks / 1008 ns, with a min of
     2997     * 8 ticks / 336 ns.)
     2998     *
     2999     * If we get a zero return value we run timers.  Non-timer EMTs shouldn't
     3000     * ever see a zero value here, so we just call TMR3TimerQueuesDo.  However,
     3001     * we do not re-run timers if we already called TMR3VirtualSyncFF above, we
     3002     * try to make sure some code is executed first.
     3003     */
     3004    uint64_t nsNow    = 0;
     3005    uint64_t cNsDelta = TMTimerPollBoolWithNanoTS(pVM, pVCpu, &nsNow);
     3006    if (cNsDelta >= 1) /* It is okay to run virtual sync timers a little early. */
     3007    { /* likely */ }
     3008    else if (!fRanTimers || VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
     3009    {
     3010        STAM_PROFILE_START(&pVCpu->iem.s.StatTimerPollRun, b);
     3011        TMR3TimerQueuesDo(pVM);
     3012        fRanTimers = true;
     3013        nsNow = 0;
     3014        cNsDelta = TMTimerPollBoolWithNanoTS(pVM, pVCpu, &nsNow);
     3015        STAM_PROFILE_STOP(&pVCpu->iem.s.StatTimerPollRun, b);
     3016    }
     3017    else
     3018        cNsDelta = 33;
     3019
     3020    /*
     3021     * Calc interval and update the timestamps.
     3022     */
    29323023    uint64_t const cNsSinceLast = nsNow - pVCpu->iem.s.nsRecompilerPollNow;
    2933 
    2934     /* Store the new timstamps.  */
    29353024    pVCpu->iem.s.nsRecompilerPollNow = nsNow;
    29363025    pVCpu->iem.s.msRecompilerPollNow = (uint32_t)(nsNow / RT_NS_1MS);
     
    29443033     * on the guest code.
    29453034     */
    2946 /** @todo can we make this even more adaptive based on current timer config as well? */
    2947     uint32_t       cIrqChecksTillNextPoll = pVCpu->iem.s.cIrqChecksTillNextPollPrev;
    2948     uint32_t const cNsIdealPollInterval   = pVCpu->iem.s.cNsIdealPollInterval;
    2949     int64_t const  nsFromIdeal            = cNsSinceLast - cNsIdealPollInterval;
     3035#ifdef IEM_WITH_ADAPTIVE_TIMER_POLLING
     3036    uint32_t cItersTillNextPoll = pVCpu->iem.s.cTbsTillNextTimerPollPrev;
     3037    if (cNsDelta >= RT_NS_1SEC / 4)
     3038    {
     3039        /*
     3040         * Non-timer EMTs should end up here with a fixed 500ms delta, just return
     3041         * the max and keep the polling over head to the deadicated timer EMT.
     3042         */
     3043        AssertCompile(IEM_TIMER_POLL_MAX_ITER * IEM_TIMER_POLL_DEFAULT_FACTOR <= RT_NS_100MS);
     3044        cItersTillNextPoll = IEM_TIMER_POLL_MAX_ITER;
     3045    }
     3046    else
     3047    {
     3048        /*
     3049         * This is the timer EMT.
     3050         */
     3051        if (cNsDelta <= IEM_TIMER_POLL_MIN_NS)
     3052        {
     3053            STAM_COUNTER_INC(&pVCpu->iem.s.StatTimerPollTiny);
     3054            cItersTillNextPoll = IEM_TIMER_POLL_MIN_ITER;
     3055            IEMTLBTRACE_USER0(pVCpu, TMVirtualSyncGetLag(pVM), RT_MAKE_U64(cNsSinceLast, cNsDelta), cItersTillNextPoll, 0);
     3056        }
     3057        else
     3058        {
     3059            uint32_t const cNsDeltaAdj   = cNsDelta >= IEM_TIMER_POLL_MAX_NS ? IEM_TIMER_POLL_MAX_NS     : (uint32_t)cNsDelta;
     3060            uint32_t const cNsDeltaSlack = cNsDelta >= IEM_TIMER_POLL_MAX_NS ? IEM_TIMER_POLL_MAX_NS / 2 : cNsDeltaAdj / 4;
     3061            if (   cNsSinceLast            < RT_MAX(IEM_TIMER_POLL_MIN_NS, 64)
     3062                || cItersTillNextPoll < IEM_TIMER_POLL_MIN_ITER /* paranoia */)
     3063            {
     3064                STAM_COUNTER_INC(&pVCpu->iem.s.StatTimerPollDefaultCalc);
     3065                cItersTillNextPoll = iemPollTimersCalcDefaultCountdown(cNsDeltaAdj);
     3066                IEMTLBTRACE_USER1(pVCpu, TMVirtualSyncGetLag(pVM), RT_MAKE_U64(cNsSinceLast, cNsDelta), cItersTillNextPoll, 3);
     3067            }
     3068            else if (   cNsSinceLast >= cNsDeltaAdj + cNsDeltaSlack
     3069                     || cNsSinceLast <= cNsDeltaAdj - cNsDeltaSlack)
     3070            {
     3071                if (cNsSinceLast >= cItersTillNextPoll)
     3072                {
     3073                    uint32_t uFactor = (uint32_t)(cNsSinceLast + cItersTillNextPoll - 1) / cItersTillNextPoll;
     3074                    cItersTillNextPoll = cNsDeltaAdj / uFactor;
     3075                    STAM_PROFILE_ADD_PERIOD(&pVCpu->iem.s.StatTimerPollFactorDivision, uFactor);
     3076                    IEMTLBTRACE_USER1(pVCpu, TMVirtualSyncGetLag(pVM), RT_MAKE_U64(cNsSinceLast, cNsDelta), cItersTillNextPoll, 1);
     3077                }
     3078                else
     3079                {
     3080                    uint32_t uFactor = cItersTillNextPoll / (uint32_t)cNsSinceLast;
     3081                    cItersTillNextPoll = cNsDeltaAdj * uFactor;
     3082                    STAM_PROFILE_ADD_PERIOD(&pVCpu->iem.s.StatTimerPollFactorMultiplication, uFactor);
     3083                    IEMTLBTRACE_USER1(pVCpu, TMVirtualSyncGetLag(pVM), RT_MAKE_U64(cNsSinceLast, cNsDelta), cItersTillNextPoll, 2);
     3084                }
     3085
     3086                if (cItersTillNextPoll >= IEM_TIMER_POLL_MIN_ITER)
     3087                {
     3088                    if (cItersTillNextPoll <= IEM_TIMER_POLL_MAX_ITER)
     3089                    { /* likely */ }
     3090                    else
     3091                    {
     3092                        STAM_COUNTER_INC(&pVCpu->iem.s.StatTimerPollMax);
     3093                        cItersTillNextPoll = IEM_TIMER_POLL_MAX_ITER;
     3094                    }
     3095                }
     3096                else
     3097                    cItersTillNextPoll = IEM_TIMER_POLL_MIN_ITER;
     3098            }
     3099            else
     3100            {
     3101                STAM_COUNTER_INC(&pVCpu->iem.s.StatTimerPollUnchanged);
     3102                IEMTLBTRACE_USER3(pVCpu, TMVirtualSyncGetLag(pVM), RT_MAKE_U64(cNsSinceLast, cNsDelta), cItersTillNextPoll, 0x00);
     3103            }
     3104        }
     3105        pVCpu->iem.s.cTbsTillNextTimerPollPrev = cItersTillNextPoll;
     3106    }
     3107#else
     3108/** Poll timers every 400 us / 2500 Hz. (source: thin air) */
     3109# define IEM_TIMER_POLL_IDEAL_NS     (400U * RT_NS_1US)
     3110    uint32_t       cItersTillNextPoll   = pVCpu->iem.s.cTbsTillNextTimerPollPrev;
     3111    uint32_t const cNsIdealPollInterval = IEM_TIMER_POLL_IDEAL_NS;
     3112    int64_t const  nsFromIdeal          = cNsSinceLast - cNsIdealPollInterval;
    29503113    if (nsFromIdeal < 0)
    29513114    {
    2952         if ((uint64_t)-nsFromIdeal > cNsIdealPollInterval / 8 && cIrqChecksTillNextPoll < _64K)
    2953         {
    2954             cIrqChecksTillNextPoll += cIrqChecksTillNextPoll / 8;
    2955             pVCpu->iem.s.cIrqChecksTillNextPollPrev = cIrqChecksTillNextPoll;
     3115        if ((uint64_t)-nsFromIdeal > cNsIdealPollInterval / 8 && cItersTillNextPoll < _64K)
     3116        {
     3117            cItersTillNextPoll += cItersTillNextPoll / 8;
     3118            pVCpu->iem.s.cTbsTillNextTimerPollPrev = cItersTillNextPoll;
    29563119        }
    29573120    }
    29583121    else
    29593122    {
    2960         if ((uint64_t)nsFromIdeal > cNsIdealPollInterval / 8 && cIrqChecksTillNextPoll > 256)
    2961         {
    2962             cIrqChecksTillNextPoll -= cIrqChecksTillNextPoll / 8;
    2963             pVCpu->iem.s.cIrqChecksTillNextPollPrev = cIrqChecksTillNextPoll;
    2964         }
    2965     }
    2966     pVCpu->iem.s.cIrqChecksTillNextPoll = cIrqChecksTillNextPoll;
     3123        if ((uint64_t)nsFromIdeal > cNsIdealPollInterval / 8 && cItersTillNextPoll > 256)
     3124        {
     3125            cItersTillNextPoll -= cItersTillNextPoll / 8;
     3126            pVCpu->iem.s.cTbsTillNextTimerPollPrev = cItersTillNextPoll;
     3127        }
     3128    }
     3129#endif
     3130    pVCpu->iem.s.cTbsTillNextTimerPoll = cItersTillNextPoll;
    29673131
    29683132    /*
    29693133     * Repeat the IRQ and FF checks.
    29703134     */
    2971     if (!fExpired)
     3135    if (cNsDelta > 0)
    29723136    {
    29733137        uint32_t fCpu = pVCpu->fLocalForcedActions;
     
    29813145                                  || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) ) )
    29823146                      && !VM_FF_IS_ANY_SET(pVCpu->CTX_SUFF(pVM), VM_FF_ALL_MASK) ))
     3147        {
     3148            STAM_PROFILE_STOP(&pVCpu->iem.s.StatTimerPoll, a);
    29833149            return VINF_SUCCESS;
    2984     }
     3150        }
     3151    }
     3152    STAM_PROFILE_STOP(&pVCpu->iem.s.StatTimerPoll, a);
    29853153    return VINF_IEM_REEXEC_BREAK_FF;
    29863154}
     
    32653433
    32663434
    3267 VMM_INT_DECL(VBOXSTRICTRC) IEMExecRecompiler(PVMCC pVM, PVMCPUCC pVCpu)
     3435VMM_INT_DECL(VBOXSTRICTRC) IEMExecRecompiler(PVMCC pVM, PVMCPUCC pVCpu, bool fWasHalted)
    32683436{
    32693437    /*
     
    32913459#endif
    32923460        iemInitExec(pVCpu, 0 /*fExecOpts*/);
    3293     if (RT_LIKELY(pVCpu->iem.s.msRecompilerPollNow != 0))
     3461
     3462    if (RT_LIKELY(!fWasHalted && pVCpu->iem.s.msRecompilerPollNow != 0))
    32943463    { }
    32953464    else
    3296         pVCpu->iem.s.msRecompilerPollNow = (uint32_t)(TMVirtualGetNoCheck(pVM) / RT_NS_1MS);
     3465    {
     3466        /* Do polling after halt and the first time we get here. */
     3467#ifdef IEM_WITH_ADAPTIVE_TIMER_POLLING
     3468        uint64_t       nsNow      = 0;
     3469        uint32_t const cItersTillPoll = iemPollTimersCalcDefaultCountdown(TMTimerPollBoolWithNanoTS(pVM, pVCpu, &nsNow));
     3470        pVCpu->iem.s.cTbsTillNextTimerPollPrev = cItersTillPoll;
     3471        pVCpu->iem.s.cTbsTillNextTimerPoll     = cItersTillPoll;
     3472#else
     3473        uint64_t const nsNow = TMVirtualGetNoCheck(pVM);
     3474#endif
     3475        pVCpu->iem.s.nsRecompilerPollNow = nsNow;
     3476        pVCpu->iem.s.msRecompilerPollNow = (uint32_t)(nsNow / RT_NS_1MS);
     3477    }
    32973478    pVCpu->iem.s.ppTbLookupEntryR3 = &pVCpu->iem.s.pTbLookupEntryDummyR3;
    32983479
     
    33523533                    {
    33533534                        /* Once in a while we need to poll timers here. */
    3354                         if ((int32_t)--pVCpu->iem.s.cIrqChecksTillNextPoll > 0)
     3535                        if ((int32_t)--pVCpu->iem.s.cTbsTillNextTimerPoll > 0)
    33553536                        { /* likely */ }
    33563537                        else
  • trunk/src/VBox/VMM/VMMAll/TMAll.cpp

    r105673 r105698  
    11551155 * free time source for recent use tracking and such.
    11561156 *
    1157  * @returns true if timers are pending, false if not.
     1157 * @returns Nanoseconds till the next event, 0 if event already pending.
    11581158 *
    11591159 * @param   pVM         The cross context VM structure.
     
    11621162 * @thread  The emulation thread.
    11631163 */
    1164 VMM_INT_DECL(bool) TMTimerPollBoolWithNanoTS(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pnsNow)
     1164VMM_INT_DECL(uint64_t) TMTimerPollBoolWithNanoTS(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pnsNow)
    11651165{
    11661166    AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
    1167     uint64_t off = 0;
    1168     tmTimerPollInternal(pVM, pVCpu, &off, pnsNow);
    1169     return off == 0;
     1167    uint64_t offDelta = 0;
     1168    tmTimerPollInternal(pVM, pVCpu, &offDelta, pnsNow);
     1169    return offDelta;
    11701170}
    11711171
  • trunk/src/VBox/VMM/VMMR3/EM.cpp

    r103415 r105698  
    10571057 * @param   pVM         The cross context VM structure.
    10581058 * @param   pVCpu       The cross context virtual CPU structure.
     1059 * @param   fWasHalted  Set if we're comming out of a CPU HALT state.
    10591060 * @param   pfFFDone    Where to store an indicator telling whether or not
    10601061 *                      FFs were done before returning.
    10611062 *
    10621063 */
    1063 static VBOXSTRICTRC emR3RecompilerExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
     1064static VBOXSTRICTRC emR3RecompilerExecute(PVM pVM, PVMCPU pVCpu, bool fWasHalted, bool *pfFFDone)
    10641065{
    10651066    STAM_REL_PROFILE_START(&pVCpu->em.s.StatREMTotal, a);
     
    10981099#ifdef VBOX_WITH_IEM_RECOMPILER
    10991100            if (pVM->em.s.fIemRecompiled)
    1100                 rcStrict = IEMExecRecompiler(pVM, pVCpu);
     1101                rcStrict = IEMExecRecompiler(pVM, pVCpu, fWasHalted);
    11011102            else
    11021103#endif
     
    11901191        }
    11911192
     1193#ifdef VBOX_WITH_IEM_RECOMPILER
     1194        fWasHalted = false;
     1195#else
     1196        RT_NOREF(fWasHalted);
     1197#endif
    11921198    } /* The Inner Loop, recompiled execution mode version. */
    11931199
     
    22532259                case VINF_EM_RESUME:
    22542260                    Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
    2255                     /* Don't reschedule in the halted or wait for SIPI case. */
     2261                    /* Don't reschedule in the halted or wait-for-SIPI cases. */
    22562262                    if (    pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
    22572263                        ||  pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
     
    25102516                 */
    25112517                case EMSTATE_RECOMPILER:
    2512                     rc = VBOXSTRICTRC_TODO(emR3RecompilerExecute(pVM, pVCpu, &fFFDone));
     2518                    rc = VBOXSTRICTRC_TODO(emR3RecompilerExecute(pVM, pVCpu, enmOldState == EMSTATE_HALTED, &fFFDone));
    25132519                    Log2(("EMR3ExecuteVM: emR3RecompilerExecute -> %Rrc\n", rc));
    25142520                    break;
  • trunk/src/VBox/VMM/VMMR3/IEMR3.cpp

    r105673 r105698  
    227227
    228228#ifndef VBOX_VMM_TARGET_ARMV8
    229         /* Poll timers every 400 us / 2500 Hz. (source: thin air) */
    230         pVCpu->iem.s.cNsIdealPollInterval       = 400U * RT_NS_1US;
    231         pVCpu->iem.s.cIrqChecksTillNextPoll     = 128;
    232         pVCpu->iem.s.cIrqChecksTillNextPollPrev = 128;
     229        pVCpu->iem.s.cTbsTillNextTimerPoll      = 128;
     230        pVCpu->iem.s.cTbsTillNextTimerPollPrev  = 128;
    233231#endif
    234232
     
    603601# endif
    604602
    605         STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cIrqChecksTillNextPollPrev, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
    606                         "Timer polling interval",                       "/IEM/CPU%u/re/cIrqChecksTillNextPollPrev", idCpu);
     603# ifdef VBOX_WITH_STATISTICS
     604        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPoll, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
     605                        "Timer polling profiling",                      "/IEM/CPU%u/re/TimerPoll/", idCpu);
     606        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollRun, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
     607                        "Timer polling profiling",                      "/IEM/CPU%u/re/TimerPoll/Running", idCpu);
     608        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollUnchanged, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     609                        "Timer polling interval unchanged",             "/IEM/CPU%u/re/TimerPoll/Unchanged", idCpu);
     610        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollTiny, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     611                        "Timer polling interval tiny",                  "/IEM/CPU%u/re/TimerPoll/Tiny", idCpu);
     612        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollDefaultCalc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     613                        "Timer polling interval calculated using defaults", "/IEM/CPU%u/re/TimerPoll/DefaultCalc", idCpu);
     614        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollMax, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     615                        "Timer polling interval maxed out",             "/IEM/CPU%u/re/TimerPoll/Max", idCpu);
     616        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollFactorDivision, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     617                        "Timer polling factor",                         "/IEM/CPU%u/re/TimerPoll/FactorDivision", idCpu);
     618        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollFactorMultiplication, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     619                        "Timer polling factor",                         "/IEM/CPU%u/re/TimerPoll/FactorMultiplication", idCpu);
     620# endif
     621        STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbsTillNextTimerPollPrev, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     622                        "Timer polling interval (in TBs)",              "/IEM/CPU%u/re/TimerPollInterval", idCpu);
    607623
    608624        PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r105673 r105698  
    9292# define IEM_WITH_THROW_CATCH
    9393#endif /*ASM-NOINC-END*/
     94
     95/** @def IEM_WITH_ADAPTIVE_TIMER_POLLING
     96 * Enables the adaptive timer polling code.
     97 */
     98#if defined(DOXYGEN_RUNNING) || 0
     99# define IEM_WITH_ADAPTIVE_TIMER_POLLING
     100#endif
    94101
    95102/** @def IEM_WITH_INTRA_TB_JUMPS
     
    821828#endif /* !IEM_WITH_TLB_TRACE */
    822829
    823 #if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3)
     830#if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3) && 1
    824831# define IEMTLBTRACE_INVLPG(a_pVCpu, a_GCPtr) \
    825832    iemTlbTrace(a_pVCpu, kIemTlbTraceType_InvlPg, a_GCPtr)
     
    21882195
    21892196    /** The number of IRQ/FF checks till the next timer poll call. */
    2190     uint32_t                cIrqChecksTillNextPoll;
     2197    uint32_t                cTbsTillNextTimerPoll;
    21912198    /** The virtual sync time at the last timer poll call in milliseconds. */
    21922199    uint32_t                msRecompilerPollNow;
    21932200    /** The virtual sync time at the last timer poll call in nanoseconds. */
    21942201    uint64_t                nsRecompilerPollNow;
    2195     /** The previous cIrqChecksTillNextPoll value. */
    2196     uint32_t                cIrqChecksTillNextPollPrev;
    2197     /** The ideal nanosecond interval between two timer polls.
    2198      * @todo make this adaptive?  */
    2199     uint32_t                cNsIdealPollInterval;
     2202    /** The previous cTbsTillNextTimerPoll value. */
     2203    uint32_t                cTbsTillNextTimerPollPrev;
    22002204
    22012205    /** The current instruction number in a native TB.
     
    22422246     *  currently not up to date in EFLAGS. */
    22432247    uint32_t                fSkippingEFlags;
    2244     /** Spaced reserved for recompiler data / alignment. */
    2245     uint32_t                u32RecompilerStuff2;
    22462248#if 0  /* unused */
    22472249    /** Previous GCPhysInstrBuf value - only valid if fTbCrossedPage is set.   */
     
    24932495    /** @} */
    24942496
     2497    /** Timer polling statistics (debug only).
     2498     * @{  */
     2499    STAMPROFILE             StatTimerPoll;
     2500    STAMPROFILE             StatTimerPollPoll;
     2501    STAMPROFILE             StatTimerPollRun;
     2502    STAMCOUNTER             StatTimerPollUnchanged;
     2503    STAMCOUNTER             StatTimerPollTiny;
     2504    STAMCOUNTER             StatTimerPollDefaultCalc;
     2505    STAMCOUNTER             StatTimerPollMax;
     2506    STAMPROFILE             StatTimerPollFactorDivision;
     2507    STAMPROFILE             StatTimerPollFactorMultiplication;
     2508    /** @} */
     2509
    24952510#ifdef IEM_WITH_TLB_TRACE
    2496     uint64_t                au64Padding[6];
     2511    uint64_t                au64Padding[7];
    24972512#else
    2498     //uint64_t                au64Padding[1];
     2513    uint64_t                au64Padding[1];
    24992514#endif
    25002515
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette