Changeset 54308 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Feb 19, 2015 7:43:51 PM (10 years ago)
- svn:sync-xref-src-repo-rev:
- 98383
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r53781 r54308 404 404 VMMRC_SYSSUFF = .gc 405 405 406 VMMRC_DEFS = IN_VMM_RC IN_RT_RC IN_DIS DIS_CORE_ONLY VBOX_WITH_RAW_MODE VBOX_WITH_RAW_MODE_NOT_R0 $(VMM_COMMON_DEFS) 406 VMMRC_DEFS = IN_VMM_RC IN_RT_RC IN_DIS DIS_CORE_ONLY VBOX_WITH_RAW_MODE VBOX_WITH_RAW_MODE_NOT_R0 IN_SUP_RC \ 407 $(VMM_COMMON_DEFS) 407 408 ifdef VBOX_WITH_VMM_R0_SWITCH_STACK 408 409 VMMRC_DEFS += VMM_R0_SWITCH_STACK … … 422 423 VMMRC_LIBS = \ 423 424 $(PATH_STAGE_LIB)/DisasmRC$(VBOX_SUFF_LIB) \ 424 $(PATH_STAGE_LIB)/RuntimeRC$(VBOX_SUFF_LIB) 425 $(PATH_STAGE_LIB)/RuntimeRC$(VBOX_SUFF_LIB) \ 426 $(PATH_STAGE_LIB)/SUPRC$(VBOX_SUFF_LIB) 425 427 ifneq ($(filter pe lx,$(VBOX_LDR_FMT32)),) 426 428 VMMRC_LIBS += \ -
trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp
r54065 r54308 51 51 52 52 53 #ifdef IN_RING3 54 /** 55 * Used by tmR3CpuTickParavirtEnable and tmR3CpuTickParavirtDisable. 56 */ 57 uint64_t tmR3CpuTickGetRawVirtualNoCheck(PVM pVM) 58 { 59 return tmCpuTickGetRawVirtual(pVM, false /*fCheckTimers*/); 60 } 61 #endif 62 63 53 64 /** 54 65 * Resumes the CPU timestamp counter ticking. … … 68 79 * unpaused before the virtual time and stopped after it. */ 69 80 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) 70 pVCpu->tm.s.offTSCRawSrc = ASMReadTSC() - pVCpu->tm.s.u64TSC;81 pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVCpu->tm.s.u64TSC; 71 82 else 72 83 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) … … 103 114 /* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */ 104 115 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) 105 pVCpu->tm.s.offTSCRawSrc = ASMReadTSC() - pVM->tm.s.u64LastPausedTSC;116 pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVM->tm.s.u64LastPausedTSC; 106 117 else 107 118 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) … … 211 222 * 212 223 * @returns true/false accordingly. 224 * @param pVM Pointer to the cross context VM structure. 213 225 * @param pVCpu Pointer to the VMCPU. 214 * @param poffRealTSC The offset against the TSC of the current CPU. 215 * @param pfParavirtTsc Where to store whether paravirt. TSC is enabled. 226 * @param poffRealTsc The offset against the TSC of the current host CPU, 227 * if pfOffsettedTsc is set to true. 228 * @param pfParavirtTsc Where to return whether paravirt TSC is enabled. 216 229 * 217 230 * @thread EMT(pVCpu). 218 231 * @see TMCpuTickGetDeadlineAndTscOffset(). 219 232 */ 220 VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfParavirtTsc) 221 { 222 PVM pVM = pVCpu->CTX_SUFF(pVM); 223 bool fOffsettedTsc = false; 233 VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVM pVM, PVMCPU pVCpu, uint64_t *poffRealTsc, bool *pfParavirtTsc) 234 { 235 Assert(pVCpu->tm.s.fTSCTicking); 236 237 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled; 238 239 /* 240 * In real TSC mode it's easy, we just need the delta & offTscRawSrc and 241 * the CPU will add them to RDTSC and RDTSCP at runtime. 242 * 243 * In tmCpuTickGetInternal we do: 244 * SUPReadTsc() - pVCpu->tm.s.offTSCRawSrc; 245 * Where SUPReadTsc() does: 246 * ASMReadTSC() - pGipCpu->i64TscDelta; 247 * Which means tmCpuTickGetInternal actually does: 248 * ASMReadTSC() - pGipCpu->i64TscDelta - pVCpu->tm.s.offTSCRawSrc; 249 * So, the offset to be ADDED to RDTSC[P] is: 250 * offRealTsc = -(pGipCpu->i64TscDelta + pVCpu->tm.s.offTSCRawSrc) 251 */ 252 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) 253 { 254 /** @todo We should negate both deltas! It's soo weird that we do the 255 * exact opposite of what the hardware implements. */ 256 #ifdef IN_RING3 257 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDelta(); 258 #else 259 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet); 260 #endif 261 return true; 262 } 224 263 225 264 /* … … 232 271 * c) we're not using warp drive (accelerated virtual guest time). 233 272 */ 234 Assert(pVCpu->tm.s.fTSCTicking);235 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;236 237 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET)238 {239 /* The source is the real TSC. */240 *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc;241 return true; /** @todo count this? */242 }243 244 273 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC 245 274 && !pVM->tm.s.fVirtualSyncCatchUp … … 254 283 * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase 255 284 * the chance that we'll get interrupted right after the timer expired. */ 256 uint64_t u64TSC = ASMReadTSC(); /** @todo should be replaced with SUPReadTSC() eventually. */ 257 *poffRealTSC = u64Now - u64TSC; 258 fOffsettedTsc = u64Now >= pVCpu->tm.s.u64TSCLastSeen; 259 return true; /** @todo count this? */ 285 if (u64Now >= pVCpu->tm.s.u64TSCLastSeen) 286 { 287 *poffRealTsc = u64Now - ASMReadTSC(); 288 return true; /** @todo count this? */ 289 } 260 290 } 261 291 … … 274 304 * 275 305 * @returns The number of host cpu ticks to the next deadline. Max one second. 276 * @param cNsToDeadline The number of nano seconds to the next virtual 277 * sync deadline. 278 */ 279 DECLINLINE(uint64_t) tmCpuCalcTicksToDeadline(uint64_t cNsToDeadline) 306 * @param pVCpu The current CPU. 307 * @param cNsToDeadline The number of nano seconds to the next virtual 308 * sync deadline. 309 */ 310 DECLINLINE(uint64_t) tmCpuCalcTicksToDeadline(PVMCPU pVCpu, uint64_t cNsToDeadline) 280 311 { 281 312 AssertCompile(TMCLOCK_FREQ_VIRTUAL <= _4G); 313 #ifdef IN_RING3 314 uint64_t uCpuHz = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage); 315 #else 316 uint64_t uCpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet); 317 #endif 282 318 if (RT_UNLIKELY(cNsToDeadline >= TMCLOCK_FREQ_VIRTUAL)) 283 return SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage); 284 uint64_t cTicks = ASMMultU64ByU32DivByU32(SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage), 285 cNsToDeadline, 286 TMCLOCK_FREQ_VIRTUAL); 319 return uCpuHz; 320 uint64_t cTicks = ASMMultU64ByU32DivByU32(uCpuHz, cNsToDeadline, TMCLOCK_FREQ_VIRTUAL); 287 321 if (cTicks > 4000) 288 322 cTicks -= 4000; /* fudge to account for overhead */ … … 298 332 * 299 333 * @returns The number of host CPU clock ticks to the next timer deadline. 334 * @param pVM Pointer to the cross context VM structure. 300 335 * @param pVCpu The current CPU. 301 * @param poffRealTSC The offset against the TSC of the current CPU. 302 * @param pfOffsettedTsc Where to store whether TSC offsetting can be used. 303 * @param pfParavirtTsc Where to store whether paravirt. TSC is enabled. 336 * @param poffRealTsc The offset against the TSC of the current host CPU, 337 * if pfOffsettedTsc is set to true. 338 * @param pfOffsettedTsc Where to return whether TSC offsetting can be used. 339 * @param pfParavirtTsc Where to return whether paravirt TSC is enabled. 304 340 * 305 341 * @thread EMT(pVCpu). 306 342 * @see TMCpuTickCanUseRealTSC(). 307 343 */ 308 VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, uint64_t *poffRealTSC, bool *pfOffsettedTsc, 309 bool *pfParavirtTsc) 310 { 311 PVM pVM = pVCpu->CTX_SUFF(pVM); 312 uint64_t cTicksToDeadline; 344 VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVM pVM, PVMCPU pVCpu, uint64_t *poffRealTsc, 345 bool *pfOffsettedTsc, bool *pfParavirtTsc) 346 { 347 Assert(pVCpu->tm.s.fTSCTicking); 348 349 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled; 313 350 314 351 /* 315 * We require: 316 * 1. A fixed TSC, this is checked at init time. 317 * 2. That the TSC is ticking (we shouldn't be here if it isn't) 318 * 3. Either that we're using the real TSC as time source or 319 * a) we don't have any lag to catch up, and 320 * b) the virtual sync clock hasn't been halted by an expired timer, and 321 * c) we're not using warp drive (accelerated virtual guest time). 352 * Same logic as in TMCpuTickCanUseRealTSC. 322 353 */ 323 Assert(pVCpu->tm.s.fTSCTicking);324 *pfParavirtTsc = pVM->tm.s.fParavirtTscEnabled;325 326 354 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) 327 355 { 328 /* The source is the real TSC. */ 329 *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc; 330 *pfOffsettedTsc = true; 331 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM)); 332 return cTicksToDeadline; 333 } 334 356 /** @todo We should negate both deltas! It's soo weird that we do the 357 * exact opposite of what the hardware implements. */ 358 #ifdef IN_RING3 359 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDelta(); 360 #else 361 *poffRealTsc = 0 - pVCpu->tm.s.offTSCRawSrc - SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet); 362 #endif 363 *pfOffsettedTsc = true; 364 return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM)); 365 } 366 367 /* 368 * Same logic as in TMCpuTickCanUseRealTSC. 369 */ 335 370 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_DYNAMIC 336 371 && !pVM->tm.s.fVirtualSyncCatchUp … … 345 380 : u64NowVirtSync; 346 381 u64Now -= pVCpu->tm.s.offTSCRawSrc; 347 *poffRealT SC = u64Now - ASMReadTSC(); /** @todo replace with SUPReadTSC() eventually. */382 *poffRealTsc = u64Now - ASMReadTSC(); 348 383 *pfOffsettedTsc = u64Now >= pVCpu->tm.s.u64TSCLastSeen; 349 cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline); 350 return cTicksToDeadline; 384 return tmCpuCalcTicksToDeadline(pVCpu, cNsToDeadline); 351 385 } 352 386 … … 355 389 #endif 356 390 *pfOffsettedTsc = false; 357 *poffRealTSC = 0; 358 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM)); 359 return cTicksToDeadline; 391 *poffRealTsc = 0; 392 return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM)); 360 393 } 361 394 … … 375 408 PVM pVM = pVCpu->CTX_SUFF(pVM); 376 409 if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) 377 u64 = ASMReadTSC();410 u64 = SUPReadTsc(); 378 411 else 379 412 u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers); … … 497 530 && g_pSUPGlobalInfoPage->u32Mode != SUPGIPMODE_INVARIANT_TSC) 498 531 { 499 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage); 532 #ifdef IN_RING3 533 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage); 534 #elif defined(IN_RING0) 535 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, RTMpCpuIdToSetIndex(RTMpCpuId())); 536 #else 537 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, VMMGetCpu(pVM)->iHostCpuSet); 538 #endif 500 539 if (RT_LIKELY(cTSCTicksPerSecond != ~(uint64_t)0)) 501 540 return cTSCTicksPerSecond; -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r54277 r54308 1403 1403 1404 1404 /* Clear the VCPU <-> host CPU mapping as we've left HM context. */ 1405 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); 1405 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID); /** @todo r=bird: This is VMMR0.cpp's job, isn't it? */ 1406 1406 1407 1407 return VINF_SUCCESS; -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r54196 r54308 2251 2251 * intercepts. 2252 2252 * 2253 * @param pVM The shared VM handle. 2253 2254 * @param pVCpu Pointer to the VMCPU. 2254 2255 * 2255 2256 * @remarks No-long-jump zone!!! 2256 2257 */ 2257 static void hmR0SvmUpdateTscOffsetting(PVMCPU pVCpu) 2258 { 2259 bool fParavirtTsc; 2260 bool fCanUseRealTsc; 2258 static void hmR0SvmUpdateTscOffsetting(PVM pVM, PVMCPU pVCpu) 2259 { 2260 bool fParavirtTsc; 2261 2261 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 2262 fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset, &fParavirtTsc);2262 bool fCanUseRealTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &pVmcb->ctrl.u64TSCOffset, &fParavirtTsc); 2263 2263 if (fCanUseRealTsc) 2264 2264 { … … 2279 2279 if (fParavirtTsc) 2280 2280 { 2281 int rc = GIMR0UpdateParavirtTsc(pV Cpu->CTX_SUFF(pVM), 0 /* u64Offset */);2281 int rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */); 2282 2282 AssertRC(rc); 2283 2283 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscParavirt); … … 3068 3068 || idCurrentCpu != pVCpu->hm.s.idLastCpu) 3069 3069 { 3070 hmR0SvmUpdateTscOffsetting(pV Cpu);3070 hmR0SvmUpdateTscOffsetting(pVM, pVCpu); 3071 3071 pSvmTransient->fUpdateTscOffsetting = false; 3072 3072 } … … 3190 3190 3191 3191 if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC)) 3192 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset); /** @todo use SUPReadTSC() eventually. */3192 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset); 3193 3193 3194 3194 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r54196 r54308 5603 5603 * 5604 5604 * @returns VBox status code. 5605 * @param pVM Pointer to the cross context VM structure. 5605 5606 * @param pVCpu Pointer to the VMCPU. 5606 5607 * 5607 5608 * @remarks No-long-jump zone!!! 5608 5609 */ 5609 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu) 5610 { 5611 int rc = VERR_INTERNAL_ERROR_5; 5612 bool fOffsettedTsc = false; 5613 bool fParavirtTsc = false; 5614 PVM pVM = pVCpu->CTX_SUFF(pVM); 5610 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVM pVM, PVMCPU pVCpu) 5611 { 5612 int rc; 5613 bool fOffsettedTsc; 5614 bool fParavirtTsc; 5615 5615 if (pVM->hm.s.vmx.fUsePreemptTimer) 5616 5616 { 5617 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pV Cpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fOffsettedTsc,5618 &f ParavirtTsc);5617 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, 5618 &fOffsettedTsc, &fParavirtTsc); 5619 5619 5620 5620 /* Make sure the returned values have sane upper and lower boundaries. */ 5621 uint64_t u64CpuHz = SUPGetCpuHzFromG IP(g_pSUPGlobalInfoPage);5621 uint64_t u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet); 5622 5622 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */ 5623 5623 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */ … … 5628 5628 } 5629 5629 else 5630 fOffsettedTsc = TMCpuTickCanUseRealTSC(pV Cpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc);5630 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc); 5631 5631 5632 5632 /** @todo later optimize this to be done elsewhere and not before every … … 8637 8637 || idCurrentCpu != pVCpu->hm.s.idLastCpu) 8638 8638 { 8639 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pV Cpu);8639 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVM, pVCpu); 8640 8640 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false; 8641 8641 } … … 8718 8718 8719 8719 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT)) 8720 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset); /** @todo use SUPReadTSC() eventually. */8720 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset); 8721 8721 8722 8722 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x); -
trunk/src/VBox/VMM/VMMR3/TM.cpp
r54292 r54308 182 182 static DECLCALLBACK(void) tmR3TimerInfoActive(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 183 183 static DECLCALLBACK(void) tmR3InfoClocks(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 184 static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirt Toggle(PVM pVM, PVMCPU pVCpu, void *pvData);184 static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirtDisable(PVM pVM, PVMCPU pVCpu, void *pvData); 185 185 186 186 … … 934 934 * Use GIP when available. 935 935 */ 936 uint64_t u64Hz = SUPGetCpuHzFromG IP(g_pSUPGlobalInfoPage);936 uint64_t u64Hz = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage); 937 937 if (g_pSUPGlobalInfoPage->u32Mode == SUPGIPMODE_INVARIANT_TSC) 938 938 { … … 954 954 } 955 955 956 u64Hz = SUPGetCpuHzFromG IP(g_pSUPGlobalInfoPage);956 u64Hz = SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage); 957 957 if (u64Hz != UINT64_MAX) 958 958 return u64Hz; … … 1182 1182 { 1183 1183 bool fParavirtTSC = false; 1184 tmR3CpuTickParavirt Toggle(pVM, NULL /* pVCpuEmt */, &fParavirtTSC);1184 tmR3CpuTickParavirtDisable(pVM, NULL, NULL); 1185 1185 } 1186 1186 Assert(!GIMIsParavirtTscEnabled(pVM)); … … 3093 3093 3094 3094 /** 3095 * Switch TM TSC mode to the most appropriate/efficient one. 3096 * 3097 * @returns strict VBox status code. 3098 * @param pVM Pointer to the VM. 3099 * @param pVCpuEmt Pointer to the VMCPU it's called on, can be NULL. 3100 * @param pvData Opaque pointer to whether usage of paravirt. TSC is 3101 * enabled or disabled by the guest OS. 3102 * 3103 * @thread EMT. 3104 * @remarks Must only be called during an EMTs rendezvous. 3105 */ 3106 static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirtToggle(PVM pVM, PVMCPU pVCpuEmt, void *pvData) 3107 { 3108 Assert(pVM); 3109 Assert(pvData); 3110 Assert(pVM->tm.s.fTSCModeSwitchAllowed); 3111 NOREF(pVCpuEmt); 3112 3113 bool *pfEnable = (bool *)pvData; 3114 if (*pfEnable) 3115 { 3116 if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET) 3095 * @callback_method_impl{PFNVMMEMTRENDEZVOUS, 3096 * Worker for TMR3CpuTickParavirtEnable} 3097 */ 3098 static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirtEnable(PVM pVM, PVMCPU pVCpuEmt, void *pvData) 3099 { 3100 AssertPtr(pVM); Assert(pVM->tm.s.fTSCModeSwitchAllowed); NOREF(pVCpuEmt); NOREF(pvData); 3101 Assert(pVCpuEmt->tm.s.fTSCTicking); 3102 3103 if (pVM->tm.s.enmTSCMode != TMTSCMODE_REAL_TSC_OFFSET) 3104 { 3105 if (tmR3HasFixedTSC(pVM)) 3117 3106 { 3118 if (tmR3HasFixedTSC(pVM)) 3119 { 3120 uint64_t u64NowVirtSync = TMVirtualSyncGetNoCheck(pVM); 3121 uint64_t u64Now = ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL); 3122 uint32_t cCpus = pVM->cCpus; 3123 uint64_t u64RealTSC = ASMReadTSC(); /** @todo should use SUPReadTsc() */ 3124 for (uint32_t i = 0; i < cCpus; i++) 3125 { 3126 PVMCPU pVCpu = &pVM->aCpus[i]; 3127 uint64_t u64TickOld = u64Now - pVCpu->tm.s.offTSCRawSrc; 3128 3129 /* 3130 * The return value of TMCpuTickGet() and the guest's TSC value (u64Tick) must 3131 * remain constant across the TM TSC mode-switch. 3132 * OldTick = VrSync - CurOff 3133 * NewTick = RealTsc - NewOff 3134 * NewTick = OldTick 3135 * => RealTsc - NewOff = VrSync - CurOff 3136 * => NewOff = CurOff + RealTsc - VrSync 3137 */ 3138 pVCpu->tm.s.offTSCRawSrc = pVCpu->tm.s.offTSCRawSrc + u64RealTSC - u64Now; 3139 3140 /* If the new offset results in the TSC going backwards, re-adjust the offset. */ 3141 if (u64RealTSC - pVCpu->tm.s.offTSCRawSrc < u64TickOld) 3142 pVCpu->tm.s.offTSCRawSrc += u64TickOld - u64RealTSC; 3143 Assert(u64RealTSC - pVCpu->tm.s.offTSCRawSrc >= u64TickOld); 3144 } 3145 pVM->tm.s.enmTSCMode = TMTSCMODE_REAL_TSC_OFFSET; 3146 LogRel(("TM: Switched TSC mode. New enmTSCMode=%d (%s)\n", pVM->tm.s.enmTSCMode, tmR3GetTSCModeName(pVM))); 3147 } 3148 else 3149 LogRel(("TM: Host is not suitable for using TSC mode (%d - %s). Request to change TSC mode ignored.\n", 3150 TMTSCMODE_REAL_TSC_OFFSET, tmR3GetTSCModeNameEx(TMTSCMODE_REAL_TSC_OFFSET))); 3151 } 3152 } 3153 else 3154 { 3155 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET 3156 && pVM->tm.s.enmTSCMode != pVM->tm.s.enmOriginalTSCMode) 3157 { 3158 uint64_t u64NowVirtSync = TMVirtualSyncGetNoCheck(pVM); 3159 uint64_t u64Now = ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL); 3160 uint64_t u64RealTSC = ASMReadTSC(); /** @todo replace with SUPReadTSC() eventually. */ 3161 uint32_t cCpus = pVM->cCpus; 3107 /* 3108 * The return value of TMCpuTickGet() and the guest's TSC value for each 3109 * CPU must remain constant across the TM TSC mode-switch. Thus we have 3110 * the following equation (new/old signifies the new/old tsc modes): 3111 * uNewTsc = uOldTsc 3112 * 3113 * Where (see tmCpuTickGetInternal): 3114 * uOldTsc = uRawOldTsc - offTscRawSrcOld 3115 * uNewTsc = uRawNewTsc - offTscRawSrcNew 3116 * 3117 * Solve it for offTscRawSrcNew without replacing uOldTsc: 3118 * uRawNewTsc - offTscRawSrcNew = uOldTsc 3119 * => -offTscRawSrcNew = uOldTsc - uRawNewTsc 3120 * => offTscRawSrcNew = uRawNewTsc - uOldTsc 3121 */ 3122 uint64_t uRawOldTsc = tmR3CpuTickGetRawVirtualNoCheck(pVM); 3123 uint64_t uRawNewTsc = SUPReadTsc(); 3124 uint32_t cCpus = pVM->cCpus; 3162 3125 for (uint32_t i = 0; i < cCpus; i++) 3163 3126 { 3164 PVMCPU pVCpu = &pVM->aCpus[i]; 3165 uint64_t u64TickOld = u64RealTSC - pVCpu->tm.s.offTSCRawSrc; 3166 3167 /* Update the last-seen tick here as we havent't been updating it (as we don't 3168 need it) while in pure TSC-offsetting mode. */ 3169 pVCpu->tm.s.u64TSCLastSeen = pVCpu->tm.s.u64TSC; 3170 3171 /* 3172 * The return value of TMCpuTickGet() and the guest's TSC value (u64Tick) must 3173 * remain constant across the TM TSC mode-switch. 3174 * OldTick = RealTsc - CurOff 3175 * NewTick = VrSync - NewOff 3176 * NewTick = OldTick 3177 * => VrSync - NewOff = RealTsc - CurOff 3178 * => NewOff = CurOff + VrSync - RealTsc 3179 */ 3180 pVCpu->tm.s.offTSCRawSrc = pVCpu->tm.s.offTSCRawSrc + u64Now - u64RealTSC; 3181 3182 /* If the new offset results in the TSC going backwards, re-adjust the offset. */ 3183 if (u64Now - pVCpu->tm.s.offTSCRawSrc < u64TickOld) 3184 pVCpu->tm.s.offTSCRawSrc += u64TickOld - u64Now; 3185 Assert(u64Now - pVCpu->tm.s.offTSCRawSrc >= u64TickOld); 3127 PVMCPU pVCpu = &pVM->aCpus[i]; 3128 uint64_t uOldTsc = uRawOldTsc - pVCpu->tm.s.offTSCRawSrc; 3129 pVCpu->tm.s.offTSCRawSrc = uRawNewTsc - uOldTsc; 3130 Assert(uRawNewTsc - pVCpu->tm.s.offTSCRawSrc >= uOldTsc); /* paranoia^256 */ 3186 3131 } 3187 pVM->tm.s.enmTSCMode = pVM->tm.s.enmOriginalTSCMode; 3132 3133 pVM->tm.s.enmTSCMode = TMTSCMODE_REAL_TSC_OFFSET; 3188 3134 LogRel(("TM: Switched TSC mode. New enmTSCMode=%d (%s)\n", pVM->tm.s.enmTSCMode, tmR3GetTSCModeName(pVM))); 3189 3135 } 3136 else 3137 LogRel(("TM: Host is not suitable for using TSC mode (%d - %s). Request to change TSC mode ignored.\n", 3138 TMTSCMODE_REAL_TSC_OFFSET, tmR3GetTSCModeNameEx(TMTSCMODE_REAL_TSC_OFFSET))); 3190 3139 } 3191 3140 return VINF_SUCCESS; … … 3195 3144 /** 3196 3145 * Notify TM that the guest has enabled usage of a paravirtualized TSC. 3146 * 3147 * This may perform a EMT rendezvous and change the TSC virtualization mode. 3197 3148 * 3198 3149 * @returns VBox status code. … … 3203 3154 int rc = VINF_SUCCESS; 3204 3155 if (pVM->tm.s.fTSCModeSwitchAllowed) 3205 { 3206 bool fEnable = true; 3207 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, tmR3CpuTickParavirtToggle, (void *)&fEnable); 3208 } 3156 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, tmR3CpuTickParavirtEnable, NULL); 3209 3157 pVM->tm.s.fParavirtTscEnabled = true; 3210 3158 return rc; … … 3213 3161 3214 3162 /** 3163 * @callback_method_impl{PFNVMMEMTRENDEZVOUS, 3164 * Worker for TMR3CpuTickParavirtDisable} 3165 */ 3166 static DECLCALLBACK(VBOXSTRICTRC) tmR3CpuTickParavirtDisable(PVM pVM, PVMCPU pVCpuEmt, void *pvData) 3167 { 3168 AssertPtr(pVM); Assert(pVM->tm.s.fTSCModeSwitchAllowed); NOREF(pVCpuEmt); 3169 Assert(pVCpuEmt->tm.s.fTSCTicking); 3170 3171 if ( pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET 3172 && pVM->tm.s.enmTSCMode != pVM->tm.s.enmOriginalTSCMode) 3173 { 3174 /* 3175 * See tmR3CpuTickParavirtEnable for an explanation of the conversion math. 3176 */ 3177 uint64_t uRawOldTsc = SUPReadTsc(); 3178 uint64_t uRawNewTsc = tmR3CpuTickGetRawVirtualNoCheck(pVM); 3179 uint32_t cCpus = pVM->cCpus; 3180 for (uint32_t i = 0; i < cCpus; i++) 3181 { 3182 PVMCPU pVCpu = &pVM->aCpus[i]; 3183 uint64_t uOldTsc = uRawOldTsc - pVCpu->tm.s.offTSCRawSrc; 3184 pVCpu->tm.s.offTSCRawSrc = uRawNewTsc - uOldTsc; 3185 Assert(uRawNewTsc - pVCpu->tm.s.offTSCRawSrc >= uOldTsc); /* paranoia^256 */ 3186 3187 /* Update the last-seen tick here as we havent't been updating it (as we don't 3188 need it) while in pure TSC-offsetting mode. */ 3189 #if 0 /** @todo r=bird: Why use the TSC value from the last time we paused the TSC? Makes more sense to use uOldTsc doesn't it? */ 3190 pVCpu->tm.s.u64TSCLastSeen = pVCpu->tm.s.u64TSC; 3191 #else 3192 pVCpu->tm.s.u64TSCLastSeen = uOldTsc; 3193 #endif 3194 } 3195 pVM->tm.s.enmTSCMode = pVM->tm.s.enmOriginalTSCMode; 3196 LogRel(("TM: Switched TSC mode. New enmTSCMode=%d (%s)\n", pVM->tm.s.enmTSCMode, tmR3GetTSCModeName(pVM))); 3197 } 3198 return VINF_SUCCESS; 3199 } 3200 3201 3202 /** 3215 3203 * Notify TM that the guest has disabled usage of a paravirtualized TSC. 3204 * 3205 * If TMR3CpuTickParavirtEnable changed the TSC virtualization mode, this will 3206 * perform an EMT rendezvous to revert those changes. 3216 3207 * 3217 3208 * @returns VBox status code. … … 3222 3213 int rc = VINF_SUCCESS; 3223 3214 if (pVM->tm.s.fTSCModeSwitchAllowed) 3224 { 3225 bool fEnable = false; 3226 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, tmR3CpuTickParavirtToggle, (void *)&fEnable); 3227 } 3215 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, tmR3CpuTickParavirtDisable, NULL); 3228 3216 pVM->tm.s.fParavirtTscEnabled = false; 3229 3217 return rc; -
trunk/src/VBox/VMM/VMMR3/VMMTests.cpp
r50115 r54308 495 495 } 496 496 uint64_t Ticks = ASMReadTSC() - StartTick; 497 if (Ticks < (SUPGetCpuHzFromG IP(g_pSUPGlobalInfoPage) / 10000))498 RTPrintf("Warning: Ticks=%RU64 (< %RU64)\n", Ticks, SUPGetCpuHzFromG IP(g_pSUPGlobalInfoPage) / 10000);497 if (Ticks < (SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage) / 10000)) 498 RTPrintf("Warning: Ticks=%RU64 (< %RU64)\n", Ticks, SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage) / 10000); 499 499 } 500 500 -
trunk/src/VBox/VMM/VMMRC/VMMRC.cpp
r49893 r54308 136 136 case VMMGC_DO_TESTCASE_INTERRUPT_MASKING: 137 137 { 138 uint64_t u64MaxTicks = (SUPGetCpuHzFromG IP(g_pSUPGlobalInfoPage) != ~(uint64_t)0139 ? SUPGetCpuHzFromG IP(g_pSUPGlobalInfoPage)138 uint64_t u64MaxTicks = (SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage) != ~(uint64_t)0 139 ? SUPGetCpuHzFromGip(g_pSUPGlobalInfoPage) 140 140 : _2G) 141 141 / 10000; -
trunk/src/VBox/VMM/include/TMInternal.h
r54270 r54308 759 759 #endif 760 760 761 uint64_t tmR3CpuTickGetRawVirtualNoCheck(PVM pVM); 761 762 int tmCpuTickPause(PVMCPU pVCpu); 762 763 int tmCpuTickPauseLocked(PVM pVM, PVMCPU pVCpu);
Note:
See TracChangeset
for help on using the changeset viewer.