Changeset 105698 in vbox
- Timestamp:
- Aug 15, 2024 11:33:49 PM (7 months ago)
- svn:sync-xref-src-repo-rev:
- 164398
- Location:
- trunk
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/iem.h
r104991 r105698 159 159 VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu); 160 160 VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions); 161 VMM_INT_DECL(VBOXSTRICTRC) IEMExecRecompiler(PVMCC pVM, PVMCPUCC pVCpu );161 VMM_INT_DECL(VBOXSTRICTRC) IEMExecRecompiler(PVMCC pVM, PVMCPUCC pVCpu, bool fWasHalted); 162 162 /** Statistics returned by IEMExecForExits. */ 163 163 typedef struct IEMEXECFOREXITSTATS -
trunk/include/VBox/vmm/tm.h
r105673 r105698 273 273 VMMDECL(bool) TMTimerPollBool(PVMCC pVM, PVMCPUCC pVCpu); 274 274 VMM_INT_DECL(bool) TMTimerPollBoolWith32BitMilliTS(PVMCC pVM, PVMCPUCC pVCpu, uint32_t *pmsNow); 275 VMM_INT_DECL( bool)TMTimerPollBoolWithNanoTS(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pnsNow);275 VMM_INT_DECL(uint64_t) TMTimerPollBoolWithNanoTS(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pnsNow); 276 276 VMM_INT_DECL(void) TMTimerPollVoid(PVMCC pVM, PVMCPUCC pVCpu); 277 277 VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pu64Delta); -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompBltIn.cpp
r105673 r105698 221 221 { 222 222 # ifdef RT_ARCH_AMD64 223 /* dec [rbx + c IrqChecksTillNextPoll] */223 /* dec [rbx + cTbsTillNextTimerPoll] */ 224 224 pCodeBuf[off++] = 0xff; 225 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 1, RT_UOFFSETOF(VMCPU, iem.s.c IrqChecksTillNextPoll));225 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 1, RT_UOFFSETOF(VMCPU, iem.s.cTbsTillNextTimerPoll)); 226 226 227 227 /* jz ReturnBreakFF */ … … 229 229 230 230 # elif defined(RT_ARCH_ARM64) 231 AssertCompile(RTASSERT_OFFSET_OF(VMCPU, iem.s.c IrqChecksTillNextPoll) < _4K * sizeof(uint32_t));232 off = iemNativeEmitLoadGprFromVCpuU32Ex(pCodeBuf, off, idxTmpReg1, RT_UOFFSETOF(VMCPU, iem.s.c IrqChecksTillNextPoll));231 AssertCompile(RTASSERT_OFFSET_OF(VMCPU, iem.s.cTbsTillNextTimerPoll) < _4K * sizeof(uint32_t)); 232 off = iemNativeEmitLoadGprFromVCpuU32Ex(pCodeBuf, off, idxTmpReg1, RT_UOFFSETOF(VMCPU, iem.s.cTbsTillNextTimerPoll)); 233 233 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxTmpReg1, idxTmpReg1, 1, false /*f64Bit*/); 234 off = iemNativeEmitStoreGprToVCpuU32Ex(pCodeBuf, off, idxTmpReg1, RT_UOFFSETOF(VMCPU, iem.s.c IrqChecksTillNextPoll));234 off = iemNativeEmitStoreGprToVCpuU32Ex(pCodeBuf, off, idxTmpReg1, RT_UOFFSETOF(VMCPU, iem.s.cTbsTillNextTimerPoll)); 235 235 236 236 /* cbz reg1, ReturnBreakFF */ … … 398 398 399 399 /** 400 * Built-in function that works the c IrqChecksTillNextPoll counter on direct TB400 * Built-in function that works the cTbsTillNextTimerPoll counter on direct TB 401 401 * linking, like loop-jumps. 402 402 */ -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp
r105673 r105698 199 199 200 200 /** 201 * Built-in function that works the c IrqChecksTillNextPoll counter on direct TB201 * Built-in function that works the cTbsTillNextTimerPoll counter on direct TB 202 202 * linking, like loop-jumps. 203 203 */ 204 204 IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckTimers) 205 205 { 206 if (RT_LIKELY(--pVCpu->iem.s.c IrqChecksTillNextPoll > 0))206 if (RT_LIKELY(--pVCpu->iem.s.cTbsTillNextTimerPoll > 0)) 207 207 return VINF_SUCCESS; 208 208 … … 219 219 IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckTimersAndIrq) 220 220 { 221 if (RT_LIKELY(--pVCpu->iem.s.c IrqChecksTillNextPoll > 0))221 if (RT_LIKELY(--pVCpu->iem.s.cTbsTillNextTimerPoll > 0)) 222 222 return iemThreadedFunc_BltIn_CheckIrqCommon(pVCpu); 223 223 -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdRecompiler.cpp
r105673 r105698 2920 2920 *********************************************************************************************************************************/ 2921 2921 2922 /** Default TB factor. 2923 * This is basically the number of nanoseconds we guess executing a TB takes 2924 * on average. We estimates it high if we can. 2925 * @note Best if this is a power of two so it can be translated to a shift. */ 2926 #define IEM_TIMER_POLL_DEFAULT_FACTOR UINT32_C(64) 2927 /** The minimum number of nanoseconds we can allow between timer pollings. 2928 * This must take the cost of TMTimerPollBoolWithNanoTS into mind. We put that 2929 * cost at 104 ns now, thus this constant is at 256 ns. */ 2930 #define IEM_TIMER_POLL_MIN_NS UINT32_C(256) 2931 /** The IEM_TIMER_POLL_MIN_NS value roughly translated to TBs, with some grains 2932 * of salt thrown in. 2933 * The idea is that we will be able to make progress with guest code execution 2934 * before polling timers and between running timers. */ 2935 #define IEM_TIMER_POLL_MIN_ITER UINT32_C(12) 2936 /** The maximum number of nanoseconds we can allow between timer pollings. 2937 * This probably shouldn't be too high, as we don't have any timer 2938 * reprogramming feedback in the polling code. So, when a device reschedule a 2939 * timer for an earlier delivery, we won't know about it. */ 2940 #define IEM_TIMER_POLL_MAX_NS UINT32_C(8388608) /* 0x800000 ns = 8.4 ms */ 2941 /** The IEM_TIMER_POLL_MAX_NS value roughly translated to TBs, with some grains 2942 * of salt thrown in. 2943 * This helps control fluctuations in the NU benchmark. */ 2944 #define IEM_TIMER_POLL_MAX_ITER _512K 2945 2946 2947 DECL_FORCE_INLINE(uint32_t) iemPollTimersCalcDefaultCountdown(uint64_t cNsDelta) 2948 { 2949 if (cNsDelta >= IEM_TIMER_POLL_MAX_NS) 2950 return RT_MIN(IEM_TIMER_POLL_MAX_NS / IEM_TIMER_POLL_DEFAULT_FACTOR, IEM_TIMER_POLL_MAX_ITER); 2951 2952 cNsDelta = RT_BIT_64(ASMBitFirstSetU32(cNsDelta) - 1); /* round down to power of 2 */ 2953 uint32_t const cRet = cNsDelta / IEM_TIMER_POLL_DEFAULT_FACTOR; 2954 if (cRet >= IEM_TIMER_POLL_MIN_ITER) 2955 { 2956 if (cRet <= IEM_TIMER_POLL_MAX_ITER) 2957 return cRet; 2958 return IEM_TIMER_POLL_MAX_ITER; 2959 } 2960 return IEM_TIMER_POLL_MIN_ITER; 2961 } 2962 2963 2922 2964 /** 2923 2965 * Helper for polling timers. … … 2925 2967 DECLHIDDEN(int) iemPollTimers(PVMCC pVM, PVMCPUCC pVCpu) RT_NOEXCEPT 2926 2968 { 2927 /* 2928 * Do the polling and calculate the time since the last time. 2929 */ 2930 uint64_t nsNow = 0; 2931 bool const fExpired = TMTimerPollBoolWithNanoTS(pVM, pVCpu, &nsNow); 2969 STAM_PROFILE_START(&pVCpu->iem.s.StatTimerPoll, a); 2970 2971 /* 2972 * Check for VM_FF_TM_VIRTUAL_SYNC and call TMR3VirtualSyncFF if set. 2973 * This is something all EMTs can do. 2974 */ 2975 /* If the virtual sync FF is set, respond to it. */ 2976 bool fRanTimers = VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); 2977 if (!fRanTimers) 2978 { /* likely */ } 2979 else 2980 { 2981 STAM_PROFILE_START(&pVCpu->iem.s.StatTimerPollRun, b); 2982 TMR3VirtualSyncFF(pVM, pVCpu); 2983 STAM_PROFILE_STOP(&pVCpu->iem.s.StatTimerPollRun, b); 2984 } 2985 2986 /* 2987 * Poll timers. 2988 * 2989 * On the 10980xe the polling averaging 314 ticks, with a min of 201, while 2990 * running a norton utilities DOS benchmark program. TSC runs at 3GHz, 2991 * translating that to 104 ns and 67 ns respectively. (An M2 booting win11 2992 * has an average of 2 ticks / 84 ns.) 2993 * 2994 * With the same setup the TMR3VirtualSyncFF and else branch here profiles 2995 * to 79751 ticks / 26583 ns on average, with a min of 1194 ticks / 398 ns. 2996 * (An M2 booting win11 has an average of 24 ticks / 1008 ns, with a min of 2997 * 8 ticks / 336 ns.) 2998 * 2999 * If we get a zero return value we run timers. Non-timer EMTs shouldn't 3000 * ever see a zero value here, so we just call TMR3TimerQueuesDo. However, 3001 * we do not re-run timers if we already called TMR3VirtualSyncFF above, we 3002 * try to make sure some code is executed first. 3003 */ 3004 uint64_t nsNow = 0; 3005 uint64_t cNsDelta = TMTimerPollBoolWithNanoTS(pVM, pVCpu, &nsNow); 3006 if (cNsDelta >= 1) /* It is okay to run virtual sync timers a little early. */ 3007 { /* likely */ } 3008 else if (!fRanTimers || VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC)) 3009 { 3010 STAM_PROFILE_START(&pVCpu->iem.s.StatTimerPollRun, b); 3011 TMR3TimerQueuesDo(pVM); 3012 fRanTimers = true; 3013 nsNow = 0; 3014 cNsDelta = TMTimerPollBoolWithNanoTS(pVM, pVCpu, &nsNow); 3015 STAM_PROFILE_STOP(&pVCpu->iem.s.StatTimerPollRun, b); 3016 } 3017 else 3018 cNsDelta = 33; 3019 3020 /* 3021 * Calc interval and update the timestamps. 3022 */ 2932 3023 uint64_t const cNsSinceLast = nsNow - pVCpu->iem.s.nsRecompilerPollNow; 2933 2934 /* Store the new timstamps. */2935 3024 pVCpu->iem.s.nsRecompilerPollNow = nsNow; 2936 3025 pVCpu->iem.s.msRecompilerPollNow = (uint32_t)(nsNow / RT_NS_1MS); … … 2944 3033 * on the guest code. 2945 3034 */ 2946 /** @todo can we make this even more adaptive based on current timer config as well? */ 2947 uint32_t cIrqChecksTillNextPoll = pVCpu->iem.s.cIrqChecksTillNextPollPrev; 2948 uint32_t const cNsIdealPollInterval = pVCpu->iem.s.cNsIdealPollInterval; 2949 int64_t const nsFromIdeal = cNsSinceLast - cNsIdealPollInterval; 3035 #ifdef IEM_WITH_ADAPTIVE_TIMER_POLLING 3036 uint32_t cItersTillNextPoll = pVCpu->iem.s.cTbsTillNextTimerPollPrev; 3037 if (cNsDelta >= RT_NS_1SEC / 4) 3038 { 3039 /* 3040 * Non-timer EMTs should end up here with a fixed 500ms delta, just return 3041 * the max and keep the polling over head to the deadicated timer EMT. 3042 */ 3043 AssertCompile(IEM_TIMER_POLL_MAX_ITER * IEM_TIMER_POLL_DEFAULT_FACTOR <= RT_NS_100MS); 3044 cItersTillNextPoll = IEM_TIMER_POLL_MAX_ITER; 3045 } 3046 else 3047 { 3048 /* 3049 * This is the timer EMT. 3050 */ 3051 if (cNsDelta <= IEM_TIMER_POLL_MIN_NS) 3052 { 3053 STAM_COUNTER_INC(&pVCpu->iem.s.StatTimerPollTiny); 3054 cItersTillNextPoll = IEM_TIMER_POLL_MIN_ITER; 3055 IEMTLBTRACE_USER0(pVCpu, TMVirtualSyncGetLag(pVM), RT_MAKE_U64(cNsSinceLast, cNsDelta), cItersTillNextPoll, 0); 3056 } 3057 else 3058 { 3059 uint32_t const cNsDeltaAdj = cNsDelta >= IEM_TIMER_POLL_MAX_NS ? IEM_TIMER_POLL_MAX_NS : (uint32_t)cNsDelta; 3060 uint32_t const cNsDeltaSlack = cNsDelta >= IEM_TIMER_POLL_MAX_NS ? IEM_TIMER_POLL_MAX_NS / 2 : cNsDeltaAdj / 4; 3061 if ( cNsSinceLast < RT_MAX(IEM_TIMER_POLL_MIN_NS, 64) 3062 || cItersTillNextPoll < IEM_TIMER_POLL_MIN_ITER /* paranoia */) 3063 { 3064 STAM_COUNTER_INC(&pVCpu->iem.s.StatTimerPollDefaultCalc); 3065 cItersTillNextPoll = iemPollTimersCalcDefaultCountdown(cNsDeltaAdj); 3066 IEMTLBTRACE_USER1(pVCpu, TMVirtualSyncGetLag(pVM), RT_MAKE_U64(cNsSinceLast, cNsDelta), cItersTillNextPoll, 3); 3067 } 3068 else if ( cNsSinceLast >= cNsDeltaAdj + cNsDeltaSlack 3069 || cNsSinceLast <= cNsDeltaAdj - cNsDeltaSlack) 3070 { 3071 if (cNsSinceLast >= cItersTillNextPoll) 3072 { 3073 uint32_t uFactor = (uint32_t)(cNsSinceLast + cItersTillNextPoll - 1) / cItersTillNextPoll; 3074 cItersTillNextPoll = cNsDeltaAdj / uFactor; 3075 STAM_PROFILE_ADD_PERIOD(&pVCpu->iem.s.StatTimerPollFactorDivision, uFactor); 3076 IEMTLBTRACE_USER1(pVCpu, TMVirtualSyncGetLag(pVM), RT_MAKE_U64(cNsSinceLast, cNsDelta), cItersTillNextPoll, 1); 3077 } 3078 else 3079 { 3080 uint32_t uFactor = cItersTillNextPoll / (uint32_t)cNsSinceLast; 3081 cItersTillNextPoll = cNsDeltaAdj * uFactor; 3082 STAM_PROFILE_ADD_PERIOD(&pVCpu->iem.s.StatTimerPollFactorMultiplication, uFactor); 3083 IEMTLBTRACE_USER1(pVCpu, TMVirtualSyncGetLag(pVM), RT_MAKE_U64(cNsSinceLast, cNsDelta), cItersTillNextPoll, 2); 3084 } 3085 3086 if (cItersTillNextPoll >= IEM_TIMER_POLL_MIN_ITER) 3087 { 3088 if (cItersTillNextPoll <= IEM_TIMER_POLL_MAX_ITER) 3089 { /* likely */ } 3090 else 3091 { 3092 STAM_COUNTER_INC(&pVCpu->iem.s.StatTimerPollMax); 3093 cItersTillNextPoll = IEM_TIMER_POLL_MAX_ITER; 3094 } 3095 } 3096 else 3097 cItersTillNextPoll = IEM_TIMER_POLL_MIN_ITER; 3098 } 3099 else 3100 { 3101 STAM_COUNTER_INC(&pVCpu->iem.s.StatTimerPollUnchanged); 3102 IEMTLBTRACE_USER3(pVCpu, TMVirtualSyncGetLag(pVM), RT_MAKE_U64(cNsSinceLast, cNsDelta), cItersTillNextPoll, 0x00); 3103 } 3104 } 3105 pVCpu->iem.s.cTbsTillNextTimerPollPrev = cItersTillNextPoll; 3106 } 3107 #else 3108 /** Poll timers every 400 us / 2500 Hz. (source: thin air) */ 3109 # define IEM_TIMER_POLL_IDEAL_NS (400U * RT_NS_1US) 3110 uint32_t cItersTillNextPoll = pVCpu->iem.s.cTbsTillNextTimerPollPrev; 3111 uint32_t const cNsIdealPollInterval = IEM_TIMER_POLL_IDEAL_NS; 3112 int64_t const nsFromIdeal = cNsSinceLast - cNsIdealPollInterval; 2950 3113 if (nsFromIdeal < 0) 2951 3114 { 2952 if ((uint64_t)-nsFromIdeal > cNsIdealPollInterval / 8 && cI rqChecksTillNextPoll < _64K)2953 { 2954 cI rqChecksTillNextPoll += cIrqChecksTillNextPoll / 8;2955 pVCpu->iem.s.c IrqChecksTillNextPollPrev = cIrqChecksTillNextPoll;3115 if ((uint64_t)-nsFromIdeal > cNsIdealPollInterval / 8 && cItersTillNextPoll < _64K) 3116 { 3117 cItersTillNextPoll += cItersTillNextPoll / 8; 3118 pVCpu->iem.s.cTbsTillNextTimerPollPrev = cItersTillNextPoll; 2956 3119 } 2957 3120 } 2958 3121 else 2959 3122 { 2960 if ((uint64_t)nsFromIdeal > cNsIdealPollInterval / 8 && cIrqChecksTillNextPoll > 256) 2961 { 2962 cIrqChecksTillNextPoll -= cIrqChecksTillNextPoll / 8; 2963 pVCpu->iem.s.cIrqChecksTillNextPollPrev = cIrqChecksTillNextPoll; 2964 } 2965 } 2966 pVCpu->iem.s.cIrqChecksTillNextPoll = cIrqChecksTillNextPoll; 3123 if ((uint64_t)nsFromIdeal > cNsIdealPollInterval / 8 && cItersTillNextPoll > 256) 3124 { 3125 cItersTillNextPoll -= cItersTillNextPoll / 8; 3126 pVCpu->iem.s.cTbsTillNextTimerPollPrev = cItersTillNextPoll; 3127 } 3128 } 3129 #endif 3130 pVCpu->iem.s.cTbsTillNextTimerPoll = cItersTillNextPoll; 2967 3131 2968 3132 /* 2969 3133 * Repeat the IRQ and FF checks. 2970 3134 */ 2971 if ( !fExpired)3135 if (cNsDelta > 0) 2972 3136 { 2973 3137 uint32_t fCpu = pVCpu->fLocalForcedActions; … … 2981 3145 || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) ) ) 2982 3146 && !VM_FF_IS_ANY_SET(pVCpu->CTX_SUFF(pVM), VM_FF_ALL_MASK) )) 3147 { 3148 STAM_PROFILE_STOP(&pVCpu->iem.s.StatTimerPoll, a); 2983 3149 return VINF_SUCCESS; 2984 } 3150 } 3151 } 3152 STAM_PROFILE_STOP(&pVCpu->iem.s.StatTimerPoll, a); 2985 3153 return VINF_IEM_REEXEC_BREAK_FF; 2986 3154 } … … 3265 3433 3266 3434 3267 VMM_INT_DECL(VBOXSTRICTRC) IEMExecRecompiler(PVMCC pVM, PVMCPUCC pVCpu )3435 VMM_INT_DECL(VBOXSTRICTRC) IEMExecRecompiler(PVMCC pVM, PVMCPUCC pVCpu, bool fWasHalted) 3268 3436 { 3269 3437 /* … … 3291 3459 #endif 3292 3460 iemInitExec(pVCpu, 0 /*fExecOpts*/); 3293 if (RT_LIKELY(pVCpu->iem.s.msRecompilerPollNow != 0)) 3461 3462 if (RT_LIKELY(!fWasHalted && pVCpu->iem.s.msRecompilerPollNow != 0)) 3294 3463 { } 3295 3464 else 3296 pVCpu->iem.s.msRecompilerPollNow = (uint32_t)(TMVirtualGetNoCheck(pVM) / RT_NS_1MS); 3465 { 3466 /* Do polling after halt and the first time we get here. */ 3467 #ifdef IEM_WITH_ADAPTIVE_TIMER_POLLING 3468 uint64_t nsNow = 0; 3469 uint32_t const cItersTillPoll = iemPollTimersCalcDefaultCountdown(TMTimerPollBoolWithNanoTS(pVM, pVCpu, &nsNow)); 3470 pVCpu->iem.s.cTbsTillNextTimerPollPrev = cItersTillPoll; 3471 pVCpu->iem.s.cTbsTillNextTimerPoll = cItersTillPoll; 3472 #else 3473 uint64_t const nsNow = TMVirtualGetNoCheck(pVM); 3474 #endif 3475 pVCpu->iem.s.nsRecompilerPollNow = nsNow; 3476 pVCpu->iem.s.msRecompilerPollNow = (uint32_t)(nsNow / RT_NS_1MS); 3477 } 3297 3478 pVCpu->iem.s.ppTbLookupEntryR3 = &pVCpu->iem.s.pTbLookupEntryDummyR3; 3298 3479 … … 3352 3533 { 3353 3534 /* Once in a while we need to poll timers here. */ 3354 if ((int32_t)--pVCpu->iem.s.c IrqChecksTillNextPoll > 0)3535 if ((int32_t)--pVCpu->iem.s.cTbsTillNextTimerPoll > 0) 3355 3536 { /* likely */ } 3356 3537 else -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r105673 r105698 1155 1155 * free time source for recent use tracking and such. 1156 1156 * 1157 * @returns true if timers are pending, false if not.1157 * @returns Nanoseconds till the next event, 0 if event already pending. 1158 1158 * 1159 1159 * @param pVM The cross context VM structure. … … 1162 1162 * @thread The emulation thread. 1163 1163 */ 1164 VMM_INT_DECL( bool) TMTimerPollBoolWithNanoTS(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pnsNow)1164 VMM_INT_DECL(uint64_t) TMTimerPollBoolWithNanoTS(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *pnsNow) 1165 1165 { 1166 1166 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000); 1167 uint64_t off = 0;1168 tmTimerPollInternal(pVM, pVCpu, &off , pnsNow);1169 return off == 0;1167 uint64_t offDelta = 0; 1168 tmTimerPollInternal(pVM, pVCpu, &offDelta, pnsNow); 1169 return offDelta; 1170 1170 } 1171 1171 -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r103415 r105698 1057 1057 * @param pVM The cross context VM structure. 1058 1058 * @param pVCpu The cross context virtual CPU structure. 1059 * @param fWasHalted Set if we're comming out of a CPU HALT state. 1059 1060 * @param pfFFDone Where to store an indicator telling whether or not 1060 1061 * FFs were done before returning. 1061 1062 * 1062 1063 */ 1063 static VBOXSTRICTRC emR3RecompilerExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)1064 static VBOXSTRICTRC emR3RecompilerExecute(PVM pVM, PVMCPU pVCpu, bool fWasHalted, bool *pfFFDone) 1064 1065 { 1065 1066 STAM_REL_PROFILE_START(&pVCpu->em.s.StatREMTotal, a); … … 1098 1099 #ifdef VBOX_WITH_IEM_RECOMPILER 1099 1100 if (pVM->em.s.fIemRecompiled) 1100 rcStrict = IEMExecRecompiler(pVM, pVCpu );1101 rcStrict = IEMExecRecompiler(pVM, pVCpu, fWasHalted); 1101 1102 else 1102 1103 #endif … … 1190 1191 } 1191 1192 1193 #ifdef VBOX_WITH_IEM_RECOMPILER 1194 fWasHalted = false; 1195 #else 1196 RT_NOREF(fWasHalted); 1197 #endif 1192 1198 } /* The Inner Loop, recompiled execution mode version. */ 1193 1199 … … 2253 2259 case VINF_EM_RESUME: 2254 2260 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState)); 2255 /* Don't reschedule in the halted or wait for SIPI case. */2261 /* Don't reschedule in the halted or wait-for-SIPI cases. */ 2256 2262 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI 2257 2263 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED) … … 2510 2516 */ 2511 2517 case EMSTATE_RECOMPILER: 2512 rc = VBOXSTRICTRC_TODO(emR3RecompilerExecute(pVM, pVCpu, &fFFDone));2518 rc = VBOXSTRICTRC_TODO(emR3RecompilerExecute(pVM, pVCpu, enmOldState == EMSTATE_HALTED, &fFFDone)); 2513 2519 Log2(("EMR3ExecuteVM: emR3RecompilerExecute -> %Rrc\n", rc)); 2514 2520 break; -
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r105673 r105698 227 227 228 228 #ifndef VBOX_VMM_TARGET_ARMV8 229 /* Poll timers every 400 us / 2500 Hz. (source: thin air) */ 230 pVCpu->iem.s.cNsIdealPollInterval = 400U * RT_NS_1US; 231 pVCpu->iem.s.cIrqChecksTillNextPoll = 128; 232 pVCpu->iem.s.cIrqChecksTillNextPollPrev = 128; 229 pVCpu->iem.s.cTbsTillNextTimerPoll = 128; 230 pVCpu->iem.s.cTbsTillNextTimerPollPrev = 128; 233 231 #endif 234 232 … … 603 601 # endif 604 602 605 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cIrqChecksTillNextPollPrev, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 606 "Timer polling interval", "/IEM/CPU%u/re/cIrqChecksTillNextPollPrev", idCpu); 603 # ifdef VBOX_WITH_STATISTICS 604 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPoll, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, 605 "Timer polling profiling", "/IEM/CPU%u/re/TimerPoll/", idCpu); 606 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollRun, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, 607 "Timer polling profiling", "/IEM/CPU%u/re/TimerPoll/Running", idCpu); 608 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollUnchanged, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 609 "Timer polling interval unchanged", "/IEM/CPU%u/re/TimerPoll/Unchanged", idCpu); 610 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollTiny, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 611 "Timer polling interval tiny", "/IEM/CPU%u/re/TimerPoll/Tiny", idCpu); 612 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollDefaultCalc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 613 "Timer polling interval calculated using defaults", "/IEM/CPU%u/re/TimerPoll/DefaultCalc", idCpu); 614 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollMax, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 615 "Timer polling interval maxed out", "/IEM/CPU%u/re/TimerPoll/Max", idCpu); 616 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollFactorDivision, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 617 "Timer polling factor", "/IEM/CPU%u/re/TimerPoll/FactorDivision", idCpu); 618 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTimerPollFactorMultiplication, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 619 "Timer polling factor", "/IEM/CPU%u/re/TimerPoll/FactorMultiplication", idCpu); 620 # endif 621 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbsTillNextTimerPollPrev, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 622 "Timer polling interval (in TBs)", "/IEM/CPU%u/re/TimerPollInterval", idCpu); 607 623 608 624 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3; -
trunk/src/VBox/VMM/include/IEMInternal.h
r105673 r105698 92 92 # define IEM_WITH_THROW_CATCH 93 93 #endif /*ASM-NOINC-END*/ 94 95 /** @def IEM_WITH_ADAPTIVE_TIMER_POLLING 96 * Enables the adaptive timer polling code. 97 */ 98 #if defined(DOXYGEN_RUNNING) || 0 99 # define IEM_WITH_ADAPTIVE_TIMER_POLLING 100 #endif 94 101 95 102 /** @def IEM_WITH_INTRA_TB_JUMPS … … 821 828 #endif /* !IEM_WITH_TLB_TRACE */ 822 829 823 #if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3) 830 #if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3) && 1 824 831 # define IEMTLBTRACE_INVLPG(a_pVCpu, a_GCPtr) \ 825 832 iemTlbTrace(a_pVCpu, kIemTlbTraceType_InvlPg, a_GCPtr) … … 2188 2195 2189 2196 /** The number of IRQ/FF checks till the next timer poll call. */ 2190 uint32_t c IrqChecksTillNextPoll;2197 uint32_t cTbsTillNextTimerPoll; 2191 2198 /** The virtual sync time at the last timer poll call in milliseconds. */ 2192 2199 uint32_t msRecompilerPollNow; 2193 2200 /** The virtual sync time at the last timer poll call in nanoseconds. */ 2194 2201 uint64_t nsRecompilerPollNow; 2195 /** The previous cIrqChecksTillNextPoll value. */ 2196 uint32_t cIrqChecksTillNextPollPrev; 2197 /** The ideal nanosecond interval between two timer polls. 2198 * @todo make this adaptive? */ 2199 uint32_t cNsIdealPollInterval; 2202 /** The previous cTbsTillNextTimerPoll value. */ 2203 uint32_t cTbsTillNextTimerPollPrev; 2200 2204 2201 2205 /** The current instruction number in a native TB. … … 2242 2246 * currently not up to date in EFLAGS. */ 2243 2247 uint32_t fSkippingEFlags; 2244 /** Spaced reserved for recompiler data / alignment. */2245 uint32_t u32RecompilerStuff2;2246 2248 #if 0 /* unused */ 2247 2249 /** Previous GCPhysInstrBuf value - only valid if fTbCrossedPage is set. */ … … 2493 2495 /** @} */ 2494 2496 2497 /** Timer polling statistics (debug only). 2498 * @{ */ 2499 STAMPROFILE StatTimerPoll; 2500 STAMPROFILE StatTimerPollPoll; 2501 STAMPROFILE StatTimerPollRun; 2502 STAMCOUNTER StatTimerPollUnchanged; 2503 STAMCOUNTER StatTimerPollTiny; 2504 STAMCOUNTER StatTimerPollDefaultCalc; 2505 STAMCOUNTER StatTimerPollMax; 2506 STAMPROFILE StatTimerPollFactorDivision; 2507 STAMPROFILE StatTimerPollFactorMultiplication; 2508 /** @} */ 2509 2495 2510 #ifdef IEM_WITH_TLB_TRACE 2496 uint64_t au64Padding[ 6];2511 uint64_t au64Padding[7]; 2497 2512 #else 2498 //uint64_t au64Padding[1];2513 uint64_t au64Padding[1]; 2499 2514 #endif 2500 2515
Note:
See TracChangeset
for help on using the changeset viewer.