Changeset 87633 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Feb 5, 2021 9:37:09 PM (4 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp
r87292 r87633 272 272 273 273 274 #ifdef IN_RING0 /* Only used in ring-0 at present (AMD-V and VT-x). */ 274 275 /** 275 276 * Checks if AMD-V / VT-x can use an offsetted hardware TSC or not. … … 308 309 /** @todo We should negate both deltas! It's soo weird that we do the 309 310 * exact opposite of what the hardware implements. */ 310 # ifdef IN_RING3311 # ifdef IN_RING3 311 312 *poffRealTsc = (uint64_t)0 - pVCpu->tm.s.offTSCRawSrc - (uint64_t)SUPGetTscDelta(); 312 # else313 # else 313 314 *poffRealTsc = (uint64_t)0 - pVCpu->tm.s.offTSCRawSrc - (uint64_t)SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet); 314 # endif315 # endif 315 316 return true; 316 317 } … … 331 332 { 332 333 /* The source is the timer synchronous virtual clock. */ 333 uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) 334 uint64_t uTscNow; 335 uint64_t u64Now = tmCpuTickCalcFromVirtual(pVM, TMVirtualSyncGetNoCheckWithTsc(pVM, &uTscNow)) 334 336 - pVCpu->tm.s.offTSCRawSrc; 335 337 /** @todo When we start collecting statistics on how much time we spend executing … … 339 341 if (u64Now >= pVCpu->tm.s.u64TSCLastSeen) 340 342 { 341 *poffRealTsc = u64Now - ASMReadTSC(); 343 # ifdef IN_RING3 344 *poffRealTsc = u64Now - (uTscNow + (uint64_t)SUPGetTscDelta(); 345 # else 346 *poffRealTsc = u64Now - (uTscNow + (uint64_t)SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet)); 347 # endif 342 348 return true; /** @todo count this? */ 343 349 } 344 350 } 345 351 346 # ifdef VBOX_WITH_STATISTICS352 # ifdef VBOX_WITH_STATISTICS 347 353 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu); 348 # endif354 # endif 349 355 return false; 350 356 } 351 357 #endif /* IN_RING0 - at the moment */ 352 358 353 359 /** … … 383 389 384 390 391 #ifdef IN_RING0 /* Only used in ring-0 from VT-x code at the moment. */ 385 392 /** 386 393 * Gets the next deadline in host CPU clock ticks and the TSC offset if we can … … 388 395 * 389 396 * @returns The number of host CPU clock ticks to the next timer deadline. 390 * @param pVM The cross context VM structure. 391 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 392 * @param poffRealTsc The offset against the TSC of the current host CPU, 393 * if pfOffsettedTsc is set to true. 394 * @param pfOffsettedTsc Where to return whether TSC offsetting can be used. 395 * @param pfParavirtTsc Where to return whether paravirt TSC is enabled. 397 * @param pVM The cross context VM structure. 398 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 399 * @param poffRealTsc The offset against the TSC of the current host CPU, 400 * if pfOffsettedTsc is set to true. 401 * @param pfOffsettedTsc Where to return whether TSC offsetting can be used. 402 * @param pfParavirtTsc Where to return whether paravirt TSC is enabled. 403 * @param puTscNow Where to return the TSC value that the return 404 * value is relative to. This is delta adjusted. 405 * @param puDeadlineVersion Where to return the deadline "version" number. 406 * Use with TMVirtualSyncIsCurrentDeadlineVersion() 407 * to check if the absolute deadline is still up to 408 * date and the caller can skip calling this 409 * function. 396 410 * 397 411 * @thread EMT(pVCpu). … … 399 413 */ 400 414 VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCC pVM, PVMCPUCC pVCpu, uint64_t *poffRealTsc, 401 bool *pfOffsettedTsc, bool *pfParavirtTsc) 415 bool *pfOffsettedTsc, bool *pfParavirtTsc, 416 uint64_t *puTscNow, uint64_t *puDeadlineVersion) 402 417 { 403 418 Assert(pVCpu->tm.s.fTSCTicking || DBGFIsStepping(pVCpu)); … … 412 427 /** @todo We should negate both deltas! It's soo weird that we do the 413 428 * exact opposite of what the hardware implements. */ 414 # ifdef IN_RING3429 # ifdef IN_RING3 415 430 *poffRealTsc = (uint64_t)0 - pVCpu->tm.s.offTSCRawSrc - (uint64_t)SUPGetTscDelta(); 416 # else431 # else 417 432 *poffRealTsc = (uint64_t)0 - pVCpu->tm.s.offTSCRawSrc - (uint64_t)SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet); 418 # endif433 # endif 419 434 *pfOffsettedTsc = true; 420 return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM ));435 return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM, puDeadlineVersion, puTscNow)); 421 436 } 422 437 … … 431 446 /* The source is the timer synchronous virtual clock. */ 432 447 uint64_t cNsToDeadline; 433 uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline );448 uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline, puDeadlineVersion, puTscNow); 434 449 uint64_t u64Now = tmCpuTickCalcFromVirtual(pVM, u64NowVirtSync); 435 450 u64Now -= pVCpu->tm.s.offTSCRawSrc; 436 *poffRealTsc = u64Now - ASMReadTSC(); 451 452 # ifdef IN_RING3 453 *poffRealTsc = u64Now - (*puTscNow + (uint64_t)SUPGetTscDelta()); /* undoing delta */ 454 # else 455 *poffRealTsc = u64Now - (*puTscNow + (uint64_t)SUPGetTscDeltaByCpuSetIndex(pVCpu->iHostCpuSet)); /* undoing delta */ 456 # endif 437 457 *pfOffsettedTsc = u64Now >= pVCpu->tm.s.u64TSCLastSeen; 438 458 return tmCpuCalcTicksToDeadline(pVCpu, cNsToDeadline); 439 459 } 440 460 441 # ifdef VBOX_WITH_STATISTICS461 # ifdef VBOX_WITH_STATISTICS 442 462 tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu); 443 # endif463 # endif 444 464 *pfOffsettedTsc = false; 445 465 *poffRealTsc = 0; 446 return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM)); 447 } 466 return tmCpuCalcTicksToDeadline(pVCpu, TMVirtualSyncGetNsToDeadline(pVM, puDeadlineVersion, puTscNow)); 467 } 468 #endif /* IN_RING0 - at the moment */ 448 469 449 470 -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r87626 r87633 180 180 181 181 /** 182 * Wrapper around the IPRT GIP time methods, extended version. 183 */ 184 DECLINLINE(uint64_t) tmVirtualGetRawNanoTSEx(PVMCC pVM, uint64_t *puTscNow) 185 { 186 RTITMENANOTSEXTRA Extra; 187 # ifdef IN_RING3 188 uint64_t u64 = CTXALLSUFF(pVM->tm.s.pfnVirtualGetRaw)(&CTXALLSUFF(pVM->tm.s.VirtualGetRawData), &Extra); 189 # else /* !IN_RING3 */ 190 uint32_t cPrevSteps = pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps; 191 uint64_t u64 = pVM->tm.s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData), &Extra); 192 if (cPrevSteps != pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps) 193 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); 194 # endif /* !IN_RING3 */ 195 if (puTscNow) 196 *puTscNow = Extra.uTSCValue; 197 /*DBGFTRACE_POS_U64(pVM, u64);*/ 198 return u64; 199 } 200 201 202 /** 182 203 * Get the time when we're not running at 100% 183 204 * 184 205 * @returns The timestamp. 185 * @param pVM The cross context VM structure. 186 */ 187 static uint64_t tmVirtualGetRawNonNormal(PVMCC pVM) 206 * @param pVM The cross context VM structure. 207 * @param puTscNow Where to return the TSC corresponding to the returned 208 * timestamp (delta adjusted). Optional. 209 */ 210 static uint64_t tmVirtualGetRawNonNormal(PVMCC pVM, uint64_t *puTscNow) 188 211 { 189 212 /* … … 191 214 * warp drive has been enabled. 192 215 */ 193 uint64_t u64 = tmVirtualGetRawNanoTS (pVM);216 uint64_t u64 = tmVirtualGetRawNanoTSEx(pVM, puTscNow); 194 217 u64 -= pVM->tm.s.u64VirtualWarpDriveStart; 195 218 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage; … … 211 234 * 212 235 * @returns The current time stamp. 213 * @param pVM The cross context VM structure.236 * @param pVM The cross context VM structure. 214 237 */ 215 238 DECLINLINE(uint64_t) tmVirtualGetRaw(PVMCC pVM) … … 217 240 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive)) 218 241 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset; 219 return tmVirtualGetRawNonNormal(pVM); 242 return tmVirtualGetRawNonNormal(pVM, NULL /*puTscNow*/); 243 } 244 245 246 /** 247 * Get the raw virtual time, extended version. 248 * 249 * @returns The current time stamp. 250 * @param pVM The cross context VM structure. 251 * @param puTscNow Where to return the TSC corresponding to the returned 252 * timestamp (delta adjusted). Optional. 253 */ 254 DECLINLINE(uint64_t) tmVirtualGetRawEx(PVMCC pVM, uint64_t *puTscNow) 255 { 256 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive)) 257 return tmVirtualGetRawNanoTSEx(pVM, puTscNow) - pVM->tm.s.u64VirtualOffset; 258 return tmVirtualGetRawNonNormal(pVM, puTscNow); 220 259 } 221 260 … … 322 361 * the next virtual sync timer deadline. Can be 323 362 * NULL. 324 */ 325 DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVMCC pVM, uint64_t u64, uint64_t off, uint64_t *pcNsToDeadline) 363 * @param pnsAbsDeadline Where to return the absolute deadline. 364 * Optional. 365 */ 366 DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVMCC pVM, uint64_t u64, uint64_t off, 367 uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline) 326 368 { 327 369 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked); … … 375 417 376 418 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire); 419 if (pnsAbsDeadline) 420 *pnsAbsDeadline = u64Expire; /* Always return the unadjusted absolute deadline, or HM will waste time going 421 thru this code over an over again even if there aren't any timer changes. */ 377 422 if (u64 < u64Expire) 378 423 { … … 432 477 * the next virtual sync timer deadline. Can be 433 478 * NULL. 434 */ 435 DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVMCC pVM, uint64_t u64, uint64_t *pcNsToDeadline) 479 * @param pnsAbsDeadline Where to return the absolute deadline. 480 * Optional. 481 */ 482 DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVMCC pVM, uint64_t u64, uint64_t *pcNsToDeadline, uint64_t *pnsAbsDeadline) 436 483 { 437 484 /* … … 444 491 if (pcNsToDeadline) 445 492 *pcNsToDeadline = 0; 493 if (pnsAbsDeadline) 494 *pnsAbsDeadline = u64; 446 495 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked); 447 496 Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64)); … … 455 504 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync); 456 505 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)) 457 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline );506 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline, pnsAbsDeadline); 458 507 459 508 /* … … 472 521 473 522 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire); 523 if (pnsAbsDeadline) 524 *pnsAbsDeadline = u64Expire; 474 525 if (u64 < u64Expire) 475 526 { … … 516 567 * the next virtual sync timer deadline. Can be 517 568 * NULL. 569 * @param pnsAbsDeadline Where to return the absolute deadline. 570 * Optional. 571 * @param puTscNow Where to return the TSC corresponding to the 572 * returned timestamp (delta adjusted). Optional. 518 573 * @thread EMT. 519 574 */ 520 DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers, uint64_t *pcNsToDeadline) 575 DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers, uint64_t *pcNsToDeadline, 576 uint64_t *pnsAbsDeadline, uint64_t *puTscNow) 521 577 { 522 578 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet); … … 536 592 */ 537 593 Assert(pVM->tm.s.cVirtualTicking); 538 u64 = tmVirtualGetRaw (pVM);594 u64 = tmVirtualGetRawEx(pVM, puTscNow); 539 595 if (fCheckTimers) 540 596 { … … 559 615 * which is less picky or hasn't been adjusted yet 560 616 */ 617 /** @todo switch this around, have the tmVirtualSyncGetLocked code inlined 618 * here and the remainder of this function in a static worker. */ 561 619 if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS) 562 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline );620 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline); 563 621 564 622 /* … … 580 638 if (off < u64Expire) 581 639 { 640 if (pnsAbsDeadline) 641 *pnsAbsDeadline = u64Expire; 582 642 if (pcNsToDeadline) 583 643 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - off); … … 597 657 if (pcNsToDeadline) 598 658 *pcNsToDeadline = 0; 659 if (pnsAbsDeadline) 660 *pnsAbsDeadline = off; 599 661 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless); 600 662 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off)); … … 629 691 int rcLock = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock); 630 692 if (RT_SUCCESS_NP(rcLock)) 631 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline );693 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline, pnsAbsDeadline); 632 694 633 695 /* Re-check the ticking flag. */ … … 640 702 if (pcNsToDeadline) 641 703 *pcNsToDeadline = 0; 704 if (pnsAbsDeadline) 705 *pnsAbsDeadline = off; 642 706 Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off)); 643 707 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped3"); … … 704 768 /** @todo u64VirtualSyncLast */ 705 769 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire); 770 if (pnsAbsDeadline) 771 *pnsAbsDeadline = u64Expire; 706 772 if (u64 >= u64Expire) 707 773 { … … 749 815 VMM_INT_DECL(uint64_t) TMVirtualSyncGet(PVMCC pVM) 750 816 { 751 return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/ );817 return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/); 752 818 } 753 819 … … 764 830 VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheck(PVMCC pVM) 765 831 { 766 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/); 832 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/); 833 } 834 835 836 /** 837 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on 838 * TMCLOCK_VIRTUAL, also returning corresponding TSC value. 839 * 840 * @returns The timestamp. 841 * @param pVM The cross context VM structure. 842 * @param puTscNow Where to return the TSC value that the return 843 * value is relative to. This is delta adjusted. 844 * @thread EMT. 845 * @remarks May set the timer and virtual sync FFs. 846 */ 847 VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheckWithTsc(PVMCC pVM, uint64_t *puTscNow) 848 { 849 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, puTscNow); 767 850 } 768 851 … … 779 862 VMM_INT_DECL(uint64_t) TMVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers) 780 863 { 781 return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/ );864 return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/, NULL /*pnsAbsDeadline*/, NULL /*puTscNow*/); 782 865 } 783 866 … … 791 874 * @param pcNsToDeadline Where to return the number of nano seconds to 792 875 * the next virtual sync timer deadline. 876 * @param puTscNow Where to return the TSC value that the return 877 * value is relative to. This is delta adjusted. 878 * @param puDeadlineVersion Where to return the deadline "version" number. 879 * Use with TMVirtualSyncIsCurrentDeadlineVersion() 880 * to check if the absolute deadline is still up to 881 * date and the caller can skip calling this 882 * function. 793 883 * @thread EMT. 794 884 * @remarks May set the timer and virtual sync FFs. 795 885 */ 796 VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVMCC pVM, uint64_t *pcNsToDeadline) 886 VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVMCC pVM, uint64_t *pcNsToDeadline, 887 uint64_t *puDeadlineVersion, uint64_t *puTscNow) 797 888 { 798 889 uint64_t cNsToDeadlineTmp; /* try convince the compiler to skip the if tests. */ 799 uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp );890 uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp, puDeadlineVersion, puTscNow); 800 891 *pcNsToDeadline = cNsToDeadlineTmp; 801 892 return u64Now; … … 808 899 * @returns The number of TMCLOCK_VIRTUAL ticks. 809 900 * @param pVM The cross context VM structure. 901 * @param puTscNow Where to return the TSC value that the return 902 * value is relative to. This is delta adjusted. 903 * @param puDeadlineVersion Where to return the deadline "version" number. 904 * Use with TMVirtualSyncIsCurrentDeadlineVersion() 905 * to check if the absolute deadline is still up to 906 * date and the caller can skip calling this 907 * function. 810 908 * @thread EMT. 811 909 * @remarks May set the timer and virtual sync FFs. 812 910 */ 813 VMMDECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVMCC pVM )911 VMMDECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVMCC pVM, uint64_t *puDeadlineVersion, uint64_t *puTscNow) 814 912 { 815 913 uint64_t cNsToDeadline; 816 tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline );914 tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline, puDeadlineVersion, puTscNow); 817 915 return cNsToDeadline; 916 } 917 918 919 /** 920 * Checks if the given deadline is still current. 921 * 922 * @retval true if the deadline is still current. 923 * @retval false if the deadline is outdated. 924 * @param pVM The cross context VM structure. 925 * @param uDeadlineVersion The deadline version to check. 926 */ 927 VMM_INT_DECL(bool) TMVirtualSyncIsCurrentDeadlineVersion(PVMCC pVM, uint64_t uDeadlineVersion) 928 { 929 /** @todo Try use ASMAtomicUoReadU64 instead. */ 930 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire); 931 return u64Expire == uDeadlineVersion; 818 932 } 819 933 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r87625 r87633 2284 2284 Assert(pGuestMsrLoad); 2285 2285 2286 #ifndef DEBUG_bird 2286 2287 LogFlowFunc(("pVCpu=%p idMsr=%#RX32 uGuestMsrValue=%#RX64\n", pVCpu, idMsr, uGuestMsrValue)); 2288 #endif 2287 2289 2288 2290 /* Check if the MSR already exists in the VM-entry MSR-load area. */ … … 2369 2371 uint32_t cMsrs = pVmcsInfo->cEntryMsrLoad; 2370 2372 2373 #ifndef DEBUG_bird 2371 2374 LogFlowFunc(("pVCpu=%p idMsr=%#RX32\n", pVCpu, idMsr)); 2375 #endif 2372 2376 2373 2377 for (uint32_t i = 0; i < cMsrs; i++) … … 7114 7118 * @param pVCpu The cross context virtual CPU structure. 7115 7119 * @param pVmxTransient The VMX-transient structure. 7120 * @param idCurrentCpu The current CPU number. 7116 7121 * 7117 7122 * @remarks No-long-jump zone!!! 7118 7123 */ 7119 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient )7124 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, RTCPUID idCurrentCpu) 7120 7125 { 7121 7126 bool fOffsettedTsc; 7122 7127 bool fParavirtTsc; 7123 7128 uint64_t uTscOffset; 7124 PVMCC pVM= pVCpu->CTX_SUFF(pVM);7129 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 7125 7130 PVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 7126 7131 7127 7132 if (pVM->hmr0.s.vmx.fUsePreemptTimer) 7128 7133 { 7129 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &uTscOffset, &fOffsettedTsc, &fParavirtTsc); 7134 7135 /* The TMCpuTickGetDeadlineAndTscOffset function is expensive (calling it on 7136 every entry slowed down the bs2-test1 CPUID testcase by ~33% (on an 10980xe). */ 7137 uint64_t cTicksToDeadline; 7138 if ( idCurrentCpu == pVCpu->hmr0.s.idLastCpu 7139 && TMVirtualSyncIsCurrentDeadlineVersion(pVM, pVCpu->hmr0.s.vmx.uTscDeadlineVersion)) 7140 { 7141 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatVmxPreemptionReusingDeadline); 7142 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc); 7143 cTicksToDeadline = pVCpu->hmr0.s.vmx.uTscDeadline - SUPReadTsc(); 7144 if ((int64_t)cTicksToDeadline > 0) 7145 { /* hopefully */ } 7146 else 7147 { 7148 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatVmxPreemptionReusingDeadlineExpired); 7149 cTicksToDeadline = 0; 7150 } 7151 } 7152 else 7153 { 7154 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatVmxPreemptionRecalcingDeadline); 7155 cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &uTscOffset, &fOffsettedTsc, &fParavirtTsc, 7156 &pVCpu->hmr0.s.vmx.uTscDeadline, 7157 &pVCpu->hmr0.s.vmx.uTscDeadlineVersion); 7158 pVCpu->hmr0.s.vmx.uTscDeadline += cTicksToDeadline; 7159 if (cTicksToDeadline >= 128) 7160 { /* hopefully */ } 7161 else 7162 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatVmxPreemptionRecalcingDeadlineExpired); 7163 } 7130 7164 7131 7165 /* Make sure the returned values have sane upper and lower boundaries. */ 7132 uint64_t u64CpuHz= SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet);7166 uint64_t const u64CpuHz = SUPGetCpuHzFromGipBySetIndex(g_pSUPGlobalInfoPage, pVCpu->iHostCpuSet); 7133 7167 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */ /** @todo r=bird: Once real+virtual timers move to separate thread, we can raise the upper limit (16ms isn't much). ASSUMES working poke cpu function. */ 7134 7168 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */ … … 11042 11076 || idCurrentCpu != pVCpu->hmr0.s.idLastCpu) 11043 11077 { 11044 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pVmxTransient );11078 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pVmxTransient, idCurrentCpu); 11045 11079 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = true; 11046 11080 } … … 15001 15035 /* If the VMX-preemption timer has expired, reinitialize the preemption timer on next VM-entry. */ 15002 15036 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false; 15037 Log12(("hmR0VmxExitPreemptTimer:\n")); 15003 15038 15004 15039 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */ 15005 15040 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 15006 15041 bool fTimersPending = TMTimerPollBool(pVM, pVCpu); 15007 STAM_ COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);15042 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer); 15008 15043 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS; 15009 15044 } -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r87563 r87633 704 704 #endif 705 705 706 #ifdef VBOX_WITH_STATISTICS707 706 bool const fCpuSupportsVmx = ASMIsIntelCpu() || ASMIsViaCentaurCpu() || ASMIsShanghaiCpu(); 708 #endif709 707 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) 710 708 { … … 792 790 HM_REG_COUNTER(&pHmCpu->StatExitHostNmiInGC, "/HM/CPU%u/Exit/HostNmiInGC", "Host NMI received while in guest context."); 793 791 HM_REG_COUNTER(&pHmCpu->StatExitHostNmiInGCIpi, "/HM/CPU%u/Exit/HostNmiInGCIpi", "Host NMI received while in guest context dispatched using IPIs."); 792 HM_REG_COUNTER(&pHmCpu->StatExitPreemptTimer, "/HM/CPU%u/Exit/PreemptTimer", "VMX-preemption timer expired."); 794 793 #ifdef VBOX_WITH_STATISTICS 795 HM_REG_COUNTER(&pHmCpu->StatExitPreemptTimer, "/HM/CPU%u/Exit/PreemptTimer", "VMX-preemption timer expired.");796 794 HM_REG_COUNTER(&pHmCpu->StatExitTprBelowThreshold, "/HM/CPU%u/Exit/TprBelowThreshold", "TPR lowered below threshold by the guest."); 797 795 HM_REG_COUNTER(&pHmCpu->StatExitTaskSwitch, "/HM/CPU%u/Exit/TaskSwitch", "Task switch caused through task gate in IDT."); … … 871 869 HM_REG_COUNTER(&pHmCpu->StatVmxCheckBadRpl, "/HM/CPU%u/VMXCheck/RPL", "Could not use VMX due to unsuitable RPL."); 872 870 HM_REG_COUNTER(&pHmCpu->StatVmxCheckPmOk, "/HM/CPU%u/VMXCheck/VMX_PM", "VMX execution in protected mode OK."); 873 871 #endif 872 if (fCpuSupportsVmx) 873 { 874 HM_REG_COUNTER(&pHmCpu->StatExitPreemptTimer, "/HM/CPU%u/PreemptTimer", "VMX-preemption timer fired."); 875 HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionReusingDeadline, "/HM/CPU%u/PreemptTimer/ReusingDeadline", "VMX-preemption timer arming logic using previously calculated deadline"); 876 HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionReusingDeadlineExpired, "/HM/CPU%u/PreemptTimer/ReusingDeadlineExpired", "VMX-preemption timer arming logic found previous deadline already expired (ignored)"); 877 HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionRecalcingDeadline, "/HM/CPU%u/PreemptTimer/RecalcingDeadline", "VMX-preemption timer arming logic recalculating the deadline (slighly expensive)"); 878 HM_REG_COUNTER(&pHmCpu->StatVmxPreemptionRecalcingDeadlineExpired, "/HM/CPU%u/PreemptTimer/RecalcingDeadlineExpired", "VMX-preemption timer arming logic found recalculated deadline expired (ignored)"); 879 } 880 #ifdef VBOX_WITH_STATISTICS 874 881 /* 875 882 * Guest Exit reason stats. -
trunk/src/VBox/VMM/include/HMInternal.h
r87606 r87633 1321 1321 STAMCOUNTER StatVmxCheckPmOk; 1322 1322 1323 STAMCOUNTER StatVmxPreemptionRecalcingDeadline; 1324 STAMCOUNTER StatVmxPreemptionRecalcingDeadlineExpired; 1325 STAMCOUNTER StatVmxPreemptionReusingDeadline; 1326 STAMCOUNTER StatVmxPreemptionReusingDeadlineExpired; 1327 1323 1328 #ifdef VBOX_WITH_STATISTICS 1324 1329 R3PTRTYPE(PSTAMCOUNTER) paStatExitReason; … … 1389 1394 /** Ring-0 pointer to the hardware-assisted VMX execution function. */ 1390 1395 PFNHMVMXSTARTVM pfnStartVm; 1396 /** Absolute TSC deadline. */ 1397 uint64_t uTscDeadline; 1398 /** The deadline version number. */ 1399 uint64_t uTscDeadlineVersion; 1391 1400 1392 1401 /** @name Guest information. -
trunk/src/VBox/VMM/include/HMInternal.mac
r87522 r87633 182 182 struc HMR0CPUVMX 183 183 .pfnStartVm RTR0PTR_RES 1 184 .uTscDeadline resq 1 185 .uTscDeadlineVersion resq 1 186 184 187 185 188 .VmcsInfo resb VMXVMCSINFO_size
Note:
See TracChangeset
for help on using the changeset viewer.