Changeset 87812 in vbox
- Timestamp:
- Feb 19, 2021 8:54:49 PM (4 years ago)
- Location:
- trunk
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/vm.h
r87792 r87812 1364 1364 struct TM s; 1365 1365 #endif 1366 uint8_t padding[9 152]; /* multiple of 64 */1366 uint8_t padding[9920]; /* multiple of 64 */ 1367 1367 } tm; 1368 1368 … … 1463 1463 1464 1464 /** Padding for aligning the structure size on a page boundrary. */ 1465 uint8_t abAlignment2[ 2392- sizeof(PVMCPUR3) * VMM_MAX_CPU_COUNT];1465 uint8_t abAlignment2[1624 - sizeof(PVMCPUR3) * VMM_MAX_CPU_COUNT]; 1466 1466 1467 1467 /* ---- end small stuff ---- */ -
trunk/include/VBox/vmm/vm.mac
r87792 r87812 63 63 alignb 64 64 64 .hm resb 5888 65 alignb 64 65 66 .nem resb 512 67 alignb 64 66 68 .trpm resb 128 69 alignb 64 67 70 .tm resb 5760 71 alignb 64 68 72 .vmm resb 896 73 alignb 64 69 74 .pdm resb 256 75 alignb 64 70 76 .iom resb 512 77 alignb 64 71 78 .dbgf resb 512 79 alignb 64 72 80 .gim resb 512 81 alignb 64 73 82 .apic resb 3840 74 83 84 alignb 64 75 85 .fTraceGroups resd 1 76 86 .uAdHoc resd 1 … … 121 131 alignb 64 122 132 .cpum resb 1536 133 alignb 64 123 134 .vmm resb 1600 135 alignb 64 124 136 .pgm resb 21120 137 alignb 64 125 138 .hm resb 5504 139 alignb 64 126 140 .trpm resb 2048 141 alignb 64 127 142 .selm resb 768 143 alignb 64 128 144 .mm resb 192 145 alignb 64 129 146 .pdm resb 8128 147 alignb 64 130 148 .iom resb 1152 149 alignb 64 131 150 .em resb 256 151 alignb 64 132 152 .nem resb 128 133 .tm resb 9152 153 alignb 64 154 .tm resb 9920 155 alignb 64 134 156 .dbgf resb 2432 157 alignb 64 135 158 .ssm resb 128 159 alignb 64 136 160 .gim resb 448 161 alignb 64 137 162 .apic resb 128 163 alignb 64 138 164 .vm resb 32 139 165 .cfgm resb 8 -
trunk/src/VBox/VMM/VMMAll/TMAll.cpp
r87792 r87812 365 365 PTMTIMERQUEUECC const pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, enmClock, pQueue); 366 366 367 if ( VM_IS_EMT(pVM) 368 && RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM))) 369 { 370 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a); 371 Log3(("tmSchedule: tmTimerQueueSchedule\n")); 372 tmTimerQueueSchedule(pVM, pQueueCC, pQueue); 367 if (VM_IS_EMT(pVM)) /** @todo drop EMT requirement here. */ 368 { 369 int rc = PDMCritSectTryEnter(&pQueue->TimerLock); 370 if (RT_SUCCESS_NP(rc)) 371 { 372 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a); 373 Log3(("tmSchedule: tmTimerQueueSchedule\n")); 374 tmTimerQueueSchedule(pVM, pQueueCC, pQueue); 373 375 #ifdef VBOX_STRICT 374 tmTimerQueuesSanityChecks(pVM, "tmSchedule");376 tmTimerQueuesSanityChecks(pVM, "tmSchedule"); 375 377 #endif 376 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);377 TM_UNLOCK_TIMERS(pVM);378 }379 else380 {381 TMTIMERSTATE enmState = pTimer->enmState; 382 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))383 tmScheduleNotify(pVM);384 }378 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a); 379 PDMCritSectLeave(&pQueue->TimerLock); 380 return; 381 } 382 } 383 384 TMTIMERSTATE enmState = pTimer->enmState; 385 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState)) 386 tmScheduleNotify(pVM); 385 387 } 386 388 … … 624 626 void tmTimerQueueSchedule(PVMCC pVM, PTMTIMERQUEUECC pQueueCC, PTMTIMERQUEUE pQueue) 625 627 { 626 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM); 627 NOREF(pVM); 628 Assert(PDMCritSectIsOwner(&pQueue->TimerLock)); 628 629 629 630 /* … … 661 662 * @param pVM The cross context VM structure. 662 663 * @param pszWhere Caller location clue. 663 *664 * @remarks Called while owning the lock.665 664 */ 666 665 void tmTimerQueuesSanityChecks(PVMCC pVM, const char *pszWhere) 667 666 { 668 TM_ASSERT_TIMER_LOCK_OWNERSHIP(pVM);669 670 667 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++) 671 668 { … … 674 671 Assert(pQueue->enmClock == (TMCLOCK)idxQueue); 675 672 676 int rc = PDMCritSect RwTryEnterShared(&pQueue->AllocLock);673 int rc = PDMCritSectTryEnter(&pQueue->TimerLock); 677 674 if (RT_SUCCESS(rc)) 678 675 { … … 780 777 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock); 781 778 } 782 PDMCritSect RwLeaveShared(&pQueue->AllocLock);779 PDMCritSectLeave(&pQueue->TimerLock); 783 780 } 784 781 } … … 1198 1195 * @param pTimer The timer handle. 1199 1196 * @param u64Expire The new expire time. 1200 */ 1201 static int tmTimerSetOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire) 1197 * @param pQueue Pointer to the shared timer queue data. 1198 * @param enmClock The sanitized clock. 1199 */ 1200 static int tmTimerSetOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t u64Expire, PTMTIMERQUEUE pQueue, TMCLOCK enmClock) 1202 1201 { 1203 1202 Assert(pTimer->idxPrev == UINT32_MAX); 1204 1203 Assert(pTimer->idxNext == UINT32_MAX); 1205 1204 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE); 1206 1207 TMCLOCK const enmClock = pTimer->enmClock;1208 AssertReturn((unsigned)enmClock < TMCLOCK_MAX, VERR_TM_IPE_2);1209 1205 1210 1206 /* … … 1224 1220 * Link the timer into the active list. 1225 1221 */ 1226 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[enmClock];1227 1222 tmTimerQueueLinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, enmClock, pQueue), pQueue, pTimer, u64Expire); 1228 1223 1229 1224 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt); 1230 TM_UNLOCK_TIMERS(pVM);1231 1225 return VINF_SUCCESS; 1232 1226 } … … 1358 1352 { 1359 1353 /* Try take the TM lock and check the state again. */ 1360 if (RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM))) 1354 TMCLOCK const enmClock = pTimer->enmClock; 1355 AssertReturn((unsigned)enmClock < TMCLOCK_MAX, VERR_TM_IPE_2); 1356 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[enmClock]; 1357 1358 int rc = PDMCritSectTryEnter(&pQueue->TimerLock); 1359 if (RT_SUCCESS_NP(rc)) 1361 1360 { 1362 1361 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1))) 1363 1362 { 1364 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire );1363 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire, pQueue, enmClock); 1365 1364 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a); 1365 PDMCritSectLeave(&pQueue->TimerLock); 1366 1366 return VINF_SUCCESS; 1367 1367 } 1368 TM_UNLOCK_TIMERS(pVM);1368 PDMCritSectLeave(&pQueue->TimerLock); 1369 1369 } 1370 1370 } … … 1508 1508 * @param pu64Now Where to return the current time stamp used. 1509 1509 * Optional. 1510 */ 1511 static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now) 1510 * @param pQueue Pointer to the shared timer queue data. 1511 * @param enmClock The sanitized clock. 1512 */ 1513 static int tmTimerSetRelativeOptimizedStart(PVMCC pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now, 1514 PTMTIMERQUEUE pQueue, TMCLOCK enmClock) 1512 1515 { 1513 1516 Assert(pTimer->idxPrev == UINT32_MAX); … … 1518 1521 * Calculate and set the expiration time. 1519 1522 */ 1520 TMCLOCK const enmClock = pTimer->enmClock;1521 AssertReturn((unsigned)enmClock < (unsigned)TMCLOCK_MAX, VERR_TM_IPE_2);1522 1523 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now); 1523 1524 pTimer->u64Expire = u64Expire; … … 1528 1529 */ 1529 1530 DBGFTRACE_U64_TAG2(pVM, u64Expire, "tmTimerSetRelativeOptimizedStart", pTimer->szName); 1530 PTMTIMERQUEUE const pQueue = &pVM->tm.s.aTimerQueues[enmClock];1531 1531 tmTimerQueueLinkActive(pVM, TM_GET_TIMER_QUEUE_CC(pVM, enmClock, pQueue), pQueue, pTimer, u64Expire); 1532 1532 1533 1533 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt); 1534 TM_UNLOCK_TIMERS(pVM);1535 1534 return VINF_SUCCESS; 1536 1535 } … … 1630 1629 1631 1630 /* Treat virtual sync timers specially. */ 1632 if (pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC) 1631 TMCLOCK enmClock = pTimer->enmClock; 1632 if (enmClock == TMCLOCK_VIRTUAL_SYNC) 1633 1633 return tmTimerVirtualSyncSetRelative(pVM, pTimer, cTicksToNext, pu64Now); 1634 AssertReturn((unsigned)enmClock < (unsigned)TMCLOCK_MAX, VERR_TM_IPE_2); 1634 1635 1635 1636 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a); … … 1670 1671 * get the innermost locks. 1671 1672 */ 1672 bool fOwnTMLock = RT_SUCCESS_NP(TM_TRY_LOCK_TIMERS(pVM)); 1673 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[enmClock]; 1674 bool fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(&pQueue->TimerLock)); 1673 1675 #if 1 1674 1676 if ( fOwnTMLock … … 1680 1682 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState))) 1681 1683 { 1682 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now );1684 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now, pQueue, enmClock); 1683 1685 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a); 1686 PDMCritSectLeave(&pQueue->TimerLock); 1684 1687 return VINF_SUCCESS; 1685 1688 } … … 1692 1695 * Unoptimized path. 1693 1696 */ 1694 int rc; 1695 TMCLOCK const enmClock = pTimer->enmClock; 1697 int rc; 1696 1698 for (int cRetries = 1000; ; cRetries--) 1697 1699 { … … 1822 1824 */ 1823 1825 if (!fOwnTMLock) 1824 fOwnTMLock = RT_SUCCESS_NP( TM_TRY_LOCK_TIMERS(pVM));1826 fOwnTMLock = RT_SUCCESS_NP(PDMCritSectTryEnter(&pQueue->TimerLock)); 1825 1827 1826 1828 } /* for (;;) */ … … 1830 1832 */ 1831 1833 if (fOwnTMLock) 1832 TM_UNLOCK_TIMERS(pVM);1834 PDMCritSectLeave(&pQueue->TimerLock); 1833 1835 1834 1836 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a); … … 1872 1874 VMMDECL(int) TMTimerSetFrequencyHint(PVMCC pVM, TMTIMERHANDLE hTimer, uint32_t uHzHint) 1873 1875 { 1874 PTMTIMER pTimer; 1875 TMTIMER_HANDLE_TO_PTR_RETURN(pVM, hTimer, pTimer); 1876 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */ 1876 1877 TMTIMER_ASSERT_CRITSECT(pVM, pTimer); 1877 1878 … … 1879 1880 pTimer->uHzHint = uHzHint; 1880 1881 1881 uint32_t const uMaxHzHint = p VM->tm.s.uMaxHzHint;1882 uint32_t const uMaxHzHint = pQueue->uMaxHzHint; 1882 1883 if ( uHzHint > uMaxHzHint 1883 1884 || uHzOldHint >= uMaxHzHint) 1884 ASMAtomic WriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);1885 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)); 1885 1886 1886 1887 return VINF_SUCCESS; … … 1907 1908 1908 1909 /* Reset the HZ hint. */ 1909 if (pTimer->uHzHint) 1910 { 1911 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint) 1912 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true); 1910 uint32_t uOldHzHint = pTimer->uHzHint; 1911 if (uOldHzHint) 1912 { 1913 if (uOldHzHint >= pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].uMaxHzHint) 1914 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(TMCLOCK_VIRTUAL_SYNC) | RT_BIT_32(TMCLOCK_VIRTUAL_SYNC + 16)); 1913 1915 pTimer->uHzHint = 0; 1914 1916 } 1915 1917 1916 1918 /* Update the timer state. */ 1917 TMTIMERSTATE const 1919 TMTIMERSTATE const enmState = pTimer->enmState; 1918 1920 switch (enmState) 1919 1921 { … … 1971 1973 VMMDECL(int) TMTimerStop(PVMCC pVM, TMTIMERHANDLE hTimer) 1972 1974 { 1973 PTMTIMER pTimer; 1974 TMTIMER_HANDLE_TO_PTR_RETURN(pVM, hTimer, pTimer); 1975 TMTIMER_HANDLE_TO_VARS_RETURN(pVM, hTimer); /* => pTimer, pQueueCC, pQueue, idxTimer, idxQueue */ 1975 1976 STAM_COUNTER_INC(&pTimer->StatStop); 1976 1977 1977 1978 /* Treat virtual sync timers specially. */ 1978 if ( pTimer->enmClock== TMCLOCK_VIRTUAL_SYNC)1979 if (idxQueue == TMCLOCK_VIRTUAL_SYNC) 1979 1980 return tmTimerVirtualSyncStop(pVM, pTimer); 1980 1981 … … 1985 1986 * Reset the HZ hint. 1986 1987 */ 1987 if (pTimer->uHzHint) 1988 { 1989 if (pTimer->uHzHint >= pVM->tm.s.uMaxHzHint) 1990 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true); 1988 uint32_t const uOldHzHint = pTimer->uHzHint; 1989 if (uOldHzHint) 1990 { 1991 if (uOldHzHint >= pQueue->uMaxHzHint) 1992 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16)); 1991 1993 pTimer->uHzHint = 0; 1992 1994 } … … 2614 2616 2615 2617 /** 2616 * Gets the highest frequency hint for all the important timers.2618 * The slow path of tmGetFrequencyHint() where we try to recalculate the value. 2617 2619 * 2618 2620 * @returns The highest frequency. 0 if no timers care. 2619 * @param pVM The cross context VM structure. 2620 */ 2621 static uint32_t tmGetFrequencyHint(PVMCC pVM) 2622 { 2621 * @param pVM The cross context VM structure. 2622 * @param uOldMaxHzHint The old global hint. 2623 */ 2624 DECL_NO_INLINE(static, uint32_t) tmGetFrequencyHintSlow(PVMCC pVM, uint32_t uOldMaxHzHint) 2625 { 2626 /* Set two bits, though not entirely sure it's needed (too exhaused to think clearly) 2627 but it should force other callers thru the slow path while we're recalculating and 2628 help us detect changes while we're recalculating. */ 2629 AssertCompile(RT_ELEMENTS(pVM->tm.s.aTimerQueues) <= 16); 2630 2623 2631 /* 2624 * Query the value, recalculate it if necessary.2625 *2626 2632 * The "right" highest frequency value isn't so important that we'll block 2627 * waiting on the timer semaphore .2633 * waiting on the timer semaphores. 2628 2634 */ 2629 uint32_t uMaxHzHint = ASMAtomicUoReadU32(&pVM->tm.s.uMaxHzHint); 2630 if (RT_UNLIKELY(ASMAtomicReadBool(&pVM->tm.s.fHzHintNeedsUpdating))) 2631 { 2632 if (RT_SUCCESS(TM_TRY_LOCK_TIMERS(pVM))) 2635 uint32_t uMaxHzHint = 0; 2636 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++) 2637 { 2638 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue]; 2639 2640 /* Get the max Hz hint for the queue. */ 2641 uint32_t uMaxHzHintQueue; 2642 if ( !(ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16))) 2643 || RT_FAILURE_NP(PDMCritSectTryEnter(&pQueue->TimerLock))) 2644 uMaxHzHintQueue = ASMAtomicReadU32(&pQueue->uMaxHzHint); 2645 else 2633 2646 { 2634 ASMAtomicWriteBool(&pVM->tm.s.fHzHintNeedsUpdating, false); 2635 2636 /* 2637 * Loop over the timers associated with each clock. 2638 */ 2639 uMaxHzHint = 0; 2640 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++) 2647 /* Is it still necessary to do updating? */ 2648 if (ASMAtomicUoReadU64(&pVM->tm.s.HzHint.u64Combined) & (RT_BIT_32(idxQueue) | RT_BIT_32(idxQueue + 16))) 2641 2649 { 2642 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue]; 2650 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue + 16)); /* clear one flag up front */ 2651 2643 2652 PTMTIMERQUEUECC pQueueCC = TM_GET_TIMER_QUEUE_CC(pVM, idxQueue, pQueue); 2644 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue); pCur; pCur = tmTimerGetNext(pQueueCC, pCur)) 2653 uMaxHzHintQueue = 0; 2654 for (PTMTIMER pCur = tmTimerQueueGetHead(pQueueCC, pQueue); 2655 pCur; 2656 pCur = tmTimerGetNext(pQueueCC, pCur)) 2645 2657 { 2646 2658 uint32_t uHzHint = ASMAtomicUoReadU32(&pCur->uHzHint); 2647 if (uHzHint > uMaxHzHint )2659 if (uHzHint > uMaxHzHintQueue) 2648 2660 { 2649 switch (pCur->enmState) 2661 TMTIMERSTATE enmState = pCur->enmState; 2662 switch (enmState) 2650 2663 { 2651 2664 case TMTIMERSTATE_ACTIVE: … … 2656 2669 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: 2657 2670 case TMTIMERSTATE_PENDING_RESCHEDULE: 2658 uMaxHzHint = uHzHint;2671 uMaxHzHintQueue = uHzHint; 2659 2672 break; 2660 2673 … … 2670 2683 } 2671 2684 } 2685 2686 /* Write the new Hz hint for the quest and clear the other update flag. */ 2687 ASMAtomicUoWriteU32(&pQueue->uMaxHzHint, uMaxHzHintQueue); 2688 ASMAtomicAndU64(&pVM->tm.s.HzHint.u64Combined, ~RT_BIT_64(idxQueue)); 2672 2689 } 2673 ASMAtomicWriteU32(&pVM->tm.s.uMaxHzHint, uMaxHzHint); 2674 Log(("tmGetFrequencyHint: New value %u Hz\n", uMaxHzHint)); 2675 TM_UNLOCK_TIMERS(pVM); 2690 else 2691 uMaxHzHintQueue = ASMAtomicUoReadU32(&pQueue->uMaxHzHint); 2692 2693 PDMCritSectLeave(&pQueue->TimerLock); 2676 2694 } 2677 } 2695 2696 /* Update the global max Hz hint. */ 2697 if (uMaxHzHint < uMaxHzHintQueue) 2698 uMaxHzHint = uMaxHzHintQueue; 2699 } 2700 2701 /* 2702 * Update the frequency hint if no pending frequency changes and we didn't race anyone thru here. 2703 */ 2704 uint64_t u64Actual = RT_MAKE_U64(0 /*no pending updates*/, uOldMaxHzHint); 2705 if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual)) 2706 Log(("tmGetFrequencyHintSlow: New value %u Hz\n", uMaxHzHint)); 2707 else 2708 for (uint32_t iTry = 1;; iTry++) 2709 { 2710 if (RT_LO_U32(u64Actual) != 0) 2711 Log(("tmGetFrequencyHintSlow: Outdated value %u Hz (%#x, try %u)\n", uMaxHzHint, RT_LO_U32(u64Actual), iTry)); 2712 else if (iTry >= 4) 2713 Log(("tmGetFrequencyHintSlow: Unable to set %u Hz (try %u)\n", uMaxHzHint, iTry)); 2714 else if (ASMAtomicCmpXchgExU64(&pVM->tm.s.HzHint.u64Combined, RT_MAKE_U64(0, uMaxHzHint), u64Actual, &u64Actual)) 2715 Log(("tmGetFrequencyHintSlow: New value %u Hz (try %u)\n", uMaxHzHint, iTry)); 2716 else 2717 continue; 2718 break; 2719 } 2678 2720 return uMaxHzHint; 2721 } 2722 2723 2724 /** 2725 * Gets the highest frequency hint for all the important timers. 2726 * 2727 * @returns The highest frequency. 0 if no timers care. 2728 * @param pVM The cross context VM structure. 2729 */ 2730 DECLINLINE(uint32_t) tmGetFrequencyHint(PVMCC pVM) 2731 { 2732 /* 2733 * Query the value, recalculate it if necessary. 2734 */ 2735 uint64_t u64Combined = ASMAtomicReadU64(&pVM->tm.s.HzHint.u64Combined); 2736 if (RT_HI_U32(u64Combined) == 0) 2737 return RT_LO_U32(u64Combined); /* hopefully somewhat likely */ 2738 return tmGetFrequencyHintSlow(pVM, RT_LO_U32(u64Combined)); 2679 2739 } 2680 2740 -
trunk/src/VBox/VMM/VMMR3/TM.cpp
r87796 r87812 210 210 AssertCompileMemberAlignment(VM, tm.s, 32); 211 211 AssertCompile(sizeof(pVM->tm.s) <= sizeof(pVM->tm.padding)); 212 AssertCompileMemberAlignment(TM, TimerCritSect, 8);213 212 AssertCompileMemberAlignment(TM, VirtualSyncLock, 8); 214 213 … … 217 216 */ 218 217 pVM->tm.s.idTimerCpu = pVM->cCpus - 1; /* The last CPU. */ 218 219 int rc = PDMR3CritSectInit(pVM, &pVM->tm.s.VirtualSyncLock, RT_SRC_POS, "TM VirtualSync Lock"); 220 AssertLogRelRCReturn(rc, rc); 219 221 220 222 strcpy(pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].szName, "virtual"); … … 231 233 pVM->tm.s.aTimerQueues[i].idxSchedule = UINT32_MAX; 232 234 pVM->tm.s.aTimerQueues[i].idxFreeHint = 1; 233 int rc = PDMR3CritSectRwInit(pVM, &pVM->tm.s.aTimerQueues[i].AllocLock, RT_SRC_POS, 234 "TM queue %s", pVM->tm.s.aTimerQueues[i].szName); 235 rc = PDMR3CritSectInit(pVM, &pVM->tm.s.aTimerQueues[i].TimerLock, RT_SRC_POS, 236 "TM %s queue timer lock", pVM->tm.s.aTimerQueues[i].szName); 237 AssertLogRelRCReturn(rc, rc); 238 239 rc = PDMR3CritSectRwInit(pVM, &pVM->tm.s.aTimerQueues[i].AllocLock, RT_SRC_POS, 240 "TM %s queue alloc lock", pVM->tm.s.aTimerQueues[i].szName); 235 241 AssertLogRelRCReturn(rc, rc); 236 242 } … … 249 255 250 256 RTHCPHYS HCPhysGIP; 251 intrc = SUPR3GipGetPhys(&HCPhysGIP);257 rc = SUPR3GipGetPhys(&HCPhysGIP); 252 258 AssertMsgRCReturn(rc, ("Failed to get GIP physical address!\n"), rc); 253 259 … … 305 311 AssertRelease(pVM->tm.s.VirtualGetRawDataR0.pu64Prev); 306 312 /* The rest is done in TMR3InitFinalize() since it's too early to call PDM. */ 307 308 /*309 * Init the locks.310 */311 rc = PDMR3CritSectInit(pVM, &pVM->tm.s.TimerCritSect, RT_SRC_POS, "TM Timer Lock");312 if (RT_FAILURE(rc))313 return rc;314 rc = PDMR3CritSectInit(pVM, &pVM->tm.s.VirtualSyncLock, RT_SRC_POS, "TM VirtualSync Lock");315 if (RT_FAILURE(rc))316 return rc;317 313 318 314 /* … … 699 695 STAM_REL_REG( pVM,(void*)&pVM->tm.s.offVirtualSync, STAMTYPE_U64, "/TM/VirtualSync/CurrentOffset", STAMUNIT_NS, "The current offset. (subtract GivenUp to get the lag)"); 700 696 STAM_REL_REG_USED(pVM,(void*)&pVM->tm.s.offVirtualSyncGivenUp, STAMTYPE_U64, "/TM/VirtualSync/GivenUp", STAMUNIT_NS, "Nanoseconds of the 'CurrentOffset' that's been given up and won't ever be attempted caught up with."); 701 STAM_REL_REG( pVM,(void*)&pVM->tm.s.uMaxHzHint, STAMTYPE_U32, "/TM/MaxHzHint", STAMUNIT_HZ, "Max guest timer frequency hint."); 697 STAM_REL_REG( pVM,(void*)&pVM->tm.s.HzHint.s.uMax, STAMTYPE_U32, "/TM/MaxHzHint", STAMUNIT_HZ, "Max guest timer frequency hint."); 698 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->tm.s.aTimerQueues); i++) 699 { 700 rc = STAMR3RegisterF(pVM, (void *)&pVM->tm.s.aTimerQueues[i].uMaxHzHint, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_HZ, 701 "", "/TM/MaxHzHint/%s", pVM->tm.s.aTimerQueues[i].szName); 702 AssertRC(rc); 703 } 702 704 703 705 #ifdef VBOX_WITH_STATISTICS … … 709 711 STAM_REG_USED(pVM,(void *)&pVM->tm.s.VirtualGetRawDataRC.cUpdateRaces,STAMTYPE_U32, "/TM/RC/cUpdateRaces", STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp."); 710 712 STAM_REG(pVM, &pVM->tm.s.StatDoQueues, STAMTYPE_PROFILE, "/TM/DoQueues", STAMUNIT_TICKS_PER_CALL, "Profiling timer TMR3TimerQueuesDo."); 711 STAM_REG(pVM, &pVM->tm.s.a StatDoQueues[TMCLOCK_VIRTUAL], STAMTYPE_PROFILE_ADV, "/TM/DoQueues/Virtual", STAMUNIT_TICKS_PER_CALL, "Time spent on the virtual clock queue.");712 STAM_REG(pVM, &pVM->tm.s.a StatDoQueues[TMCLOCK_VIRTUAL_SYNC], STAMTYPE_PROFILE_ADV,"/TM/DoQueues/VirtualSync", STAMUNIT_TICKS_PER_CALL, "Time spent on the virtual sync clock queue.");713 STAM_REG(pVM, &pVM->tm.s.a StatDoQueues[TMCLOCK_REAL], STAMTYPE_PROFILE_ADV, "/TM/DoQueues/Real", STAMUNIT_TICKS_PER_CALL, "Time spent on the real clock queue.");713 STAM_REG(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].StatDo, STAMTYPE_PROFILE, "/TM/DoQueues/Virtual", STAMUNIT_TICKS_PER_CALL, "Time spent on the virtual clock queue."); 714 STAM_REG(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].StatDo,STAMTYPE_PROFILE,"/TM/DoQueues/VirtualSync", STAMUNIT_TICKS_PER_CALL, "Time spent on the virtual sync clock queue."); 715 STAM_REG(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_REAL].StatDo, STAMTYPE_PROFILE, "/TM/DoQueues/Real", STAMUNIT_TICKS_PER_CALL, "Time spent on the real clock queue."); 714 716 715 717 STAM_REG(pVM, &pVM->tm.s.StatPoll, STAMTYPE_COUNTER, "/TM/Poll", STAMUNIT_OCCURENCES, "TMTimerPoll calls."); … … 1212 1214 LogFlow(("TMR3Reset:\n")); 1213 1215 VM_ASSERT_EMT(pVM); 1214 TM_LOCK_TIMERS(pVM);1215 1216 1216 1217 /* … … 1239 1240 * Process the queues. 1240 1241 */ 1241 for (int i = 0; i < TMCLOCK_MAX; i++) 1242 tmTimerQueueSchedule(pVM, &pVM->tm.s.aTimerQueues[i], &pVM->tm.s.aTimerQueues[i]); 1242 for (uint32_t idxQueue = 0; idxQueue < RT_ELEMENTS(pVM->tm.s.aTimerQueues); idxQueue++) 1243 { 1244 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue]; 1245 PDMCritSectEnter(&pQueue->TimerLock, VERR_IGNORED); 1246 tmTimerQueueSchedule(pVM, pQueue, pQueue); 1247 PDMCritSectLeave(&pQueue->TimerLock); 1248 } 1243 1249 #ifdef VBOX_STRICT 1244 1250 tmTimerQueuesSanityChecks(pVM, "TMR3Reset"); … … 1292 1298 pVCpu->tm.s.u64TSCLastSeen = 0; 1293 1299 } 1294 1295 TM_UNLOCK_TIMERS(pVM);1296 1300 } 1297 1301 … … 1693 1697 1694 1698 #ifdef VBOX_STRICT 1695 TM_LOCK_TIMERS(pVM);1696 1699 tmTimerQueuesSanityChecks(pVM, "tmR3TimerCreate"); 1697 TM_UNLOCK_TIMERS(pVM);1698 1700 #endif 1699 1701 … … 1900 1902 */ 1901 1903 PDMCritSectRwEnterExcl(&pQueue->AllocLock, VERR_IGNORED); 1902 TM_LOCK_TIMERS(pVM); 1904 PDMCritSectEnter(&pQueue->TimerLock, VERR_IGNORED); 1905 1903 1906 for (int cRetries = 1000;; cRetries--) 1904 1907 { … … 1937 1940 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE: 1938 1941 AssertMsgFailed(("%p:.enmState=%s %s\n", pTimer, tmTimerState(enmState), pTimer->szName)); 1939 TM_UNLOCK_TIMERS(pVM);1942 PDMCritSectLeave(&pQueue->TimerLock); 1940 1943 PDMCritSectRwLeaveExcl(&pQueue->AllocLock); 1941 1944 … … 1946 1949 1947 1950 PDMCritSectRwEnterExcl(&pQueue->AllocLock, VERR_IGNORED); 1948 TM_LOCK_TIMERS(pVM);1951 PDMCritSectEnter(&pQueue->TimerLock, VERR_IGNORED); 1949 1952 continue; 1950 1953 … … 1954 1957 case TMTIMERSTATE_FREE: 1955 1958 case TMTIMERSTATE_DESTROY: 1956 TM_UNLOCK_TIMERS(pVM);1959 PDMCritSectLeave(&pQueue->TimerLock); 1957 1960 PDMCritSectRwLeaveExcl(&pQueue->AllocLock); 1958 1961 AssertLogRelMsgFailedReturn(("pTimer=%p %s\n", pTimer, tmTimerState(enmState)), VERR_TM_INVALID_STATE); … … 1960 1963 default: 1961 1964 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, pTimer->szName)); 1962 TM_UNLOCK_TIMERS(pVM);1965 PDMCritSectLeave(&pQueue->TimerLock); 1963 1966 PDMCritSectRwLeaveExcl(&pQueue->AllocLock); 1964 1967 return VERR_TM_UNKNOWN_STATE; … … 1974 1977 break; 1975 1978 AssertMsgFailed(("%p:.enmState=%s %s\n", pTimer, tmTimerState(enmState), pTimer->szName)); 1976 TM_UNLOCK_TIMERS(pVM);1979 PDMCritSectLeave(&pQueue->TimerLock); 1977 1980 PDMCritSectRwLeaveExcl(&pQueue->AllocLock); 1978 1981 … … 1981 1984 1982 1985 PDMCritSectRwEnterExcl(&pQueue->AllocLock, VERR_IGNORED); 1983 TM_LOCK_TIMERS(pVM);1986 PDMCritSectEnter(&pQueue->TimerLock, VERR_IGNORED); 1984 1987 } 1985 1988 … … 2038 2041 tmTimerQueuesSanityChecks(pVM, "TMR3TimerDestroy"); 2039 2042 #endif 2040 TM_UNLOCK_TIMERS(pVM);2043 PDMCritSectLeave(&pQueue->TimerLock); 2041 2044 PDMCritSectRwLeaveExcl(&pQueue->AllocLock); 2042 2045 return VINF_SUCCESS; … … 2324 2327 Assert(!pVM->tm.s.fRunningQueues); 2325 2328 ASMAtomicWriteBool(&pVM->tm.s.fRunningQueues, true); 2326 TM_LOCK_TIMERS(pVM);2327 2329 2328 2330 /* … … 2331 2333 AssertCompile(TMCLOCK_MAX == 4); 2332 2334 2333 /* TMCLOCK_VIRTUAL_SYNC (see also TMR3VirtualSyncFF) */ 2334 STAM_PROFILE_ADV_START(&pVM->tm.s.aStatDoQueues[TMCLOCK_VIRTUAL_SYNC], s1); 2335 /* 2336 * TMCLOCK_VIRTUAL_SYNC (see also TMR3VirtualSyncFF) 2337 */ 2338 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC]; 2339 STAM_PROFILE_START(&pQueue->StatDo, s1); 2340 PDMCritSectEnter(&pQueue->TimerLock, VERR_IGNORED); 2335 2341 PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_IGNORED); 2336 2342 ASMAtomicWriteBool(&pVM->tm.s.fRunningVirtualSyncQueue, true); 2337 2343 VMCPU_FF_CLEAR(pVCpuDst, VMCPU_FF_TIMER); /* Clear the FF once we started working for real. */ 2338 2344 2339 Assert(p VM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL_SYNC].idxSchedule == UINT32_MAX);2345 Assert(pQueue->idxSchedule == UINT32_MAX); 2340 2346 tmR3TimerQueueRunVirtualSync(pVM); 2341 2347 if (pVM->tm.s.fVirtualSyncTicking) /** @todo move into tmR3TimerQueueRunVirtualSync - FIXME */ … … 2344 2350 ASMAtomicWriteBool(&pVM->tm.s.fRunningVirtualSyncQueue, false); 2345 2351 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock); 2346 STAM_PROFILE_ADV_STOP(&pVM->tm.s.aStatDoQueues[TMCLOCK_VIRTUAL_SYNC], s1); 2347 2348 /* TMCLOCK_VIRTUAL */ 2349 STAM_PROFILE_ADV_START(&pVM->tm.s.aStatDoQueues[TMCLOCK_VIRTUAL], s2); 2350 if (pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].idxSchedule != UINT32_MAX) 2351 tmTimerQueueSchedule(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL], &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL]); 2352 tmR3TimerQueueRun(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL]); 2353 STAM_PROFILE_ADV_STOP(&pVM->tm.s.aStatDoQueues[TMCLOCK_VIRTUAL], s2); 2354 2355 /* TMCLOCK_TSC */ 2352 PDMCritSectLeave(&pQueue->TimerLock); 2353 STAM_PROFILE_STOP(&pQueue->StatDo, s1); 2354 2355 /* 2356 * TMCLOCK_VIRTUAL 2357 */ 2358 pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL]; 2359 STAM_PROFILE_START(&pQueue->StatDo, s2); 2360 PDMCritSectEnter(&pQueue->TimerLock, VERR_IGNORED); 2361 if (pQueue->idxSchedule != UINT32_MAX) 2362 tmTimerQueueSchedule(pVM, pQueue, pQueue); 2363 tmR3TimerQueueRun(pVM, pQueue); 2364 PDMCritSectLeave(&pQueue->TimerLock); 2365 STAM_PROFILE_STOP(&pQueue->StatDo, s2); 2366 2367 /* 2368 * TMCLOCK_TSC 2369 */ 2356 2370 Assert(pVM->tm.s.aTimerQueues[TMCLOCK_TSC].idxActive == UINT32_MAX); /* not used */ 2357 2371 2358 /* TMCLOCK_REAL */ 2359 STAM_PROFILE_ADV_START(&pVM->tm.s.aStatDoQueues[TMCLOCK_REAL], s3); 2360 if (pVM->tm.s.aTimerQueues[TMCLOCK_REAL].idxSchedule != UINT32_MAX) 2361 tmTimerQueueSchedule(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_REAL], &pVM->tm.s.aTimerQueues[TMCLOCK_REAL]); 2362 tmR3TimerQueueRun(pVM, &pVM->tm.s.aTimerQueues[TMCLOCK_REAL]); 2363 STAM_PROFILE_ADV_STOP(&pVM->tm.s.aStatDoQueues[TMCLOCK_REAL], s3); 2372 /* 2373 * TMCLOCK_REAL 2374 */ 2375 pQueue = &pVM->tm.s.aTimerQueues[TMCLOCK_REAL]; 2376 STAM_PROFILE_START(&pQueue->StatDo, s3); 2377 PDMCritSectEnter(&pQueue->TimerLock, VERR_IGNORED); 2378 if (pQueue->idxSchedule != UINT32_MAX) 2379 tmTimerQueueSchedule(pVM, pQueue, pQueue); 2380 tmR3TimerQueueRun(pVM, pQueue); 2381 PDMCritSectLeave(&pQueue->TimerLock); 2382 STAM_PROFILE_STOP(&pQueue->StatDo, s3); 2364 2383 2365 2384 #ifdef VBOX_STRICT 2366 2385 /* check that we didn't screw up. */ 2367 TM_LOCK_TIMERS(pVM);2368 2386 tmTimerQueuesSanityChecks(pVM, "TMR3TimerQueuesDo"); 2369 TM_UNLOCK_TIMERS(pVM);2370 2387 #endif 2371 2388 … … 2373 2390 Log2(("TMR3TimerQueuesDo: returns void\n")); 2374 2391 ASMAtomicWriteBool(&pVM->tm.s.fRunningQueues, false); 2375 TM_UNLOCK_TIMERS(pVM);2376 2392 STAM_PROFILE_STOP(&pVM->tm.s.StatDoQueues, a); 2377 2393 } … … 2649 2665 if (fRc && pTimer->uHzHint) 2650 2666 { 2651 if (pTimer->uHzHint >= p VM->tm.s.uMaxHzHint)2652 ASMAtomic WriteBool(&pVM->tm.s.fHzHintNeedsUpdating, true);2667 if (pTimer->uHzHint >= pQueue->uMaxHzHint) 2668 ASMAtomicOrU64(&pVM->tm.s.HzHint.u64Combined, RT_BIT_32(TMCLOCK_VIRTUAL_SYNC) | RT_BIT_32(TMCLOCK_VIRTUAL_SYNC + 16)); 2653 2669 pTimer->uHzHint = 0; 2654 2670 } … … 2825 2841 2826 2842 /* try run it. */ 2827 TM_LOCK_TIMERS(pVM);2843 PDMCritSectEnter(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].TimerLock, VERR_IGNORED); 2828 2844 PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_IGNORED); 2829 2845 if (pVM->tm.s.fVirtualSyncTicking) … … 2841 2857 ASMAtomicWriteBool(&pVM->tm.s.fRunningVirtualSyncQueue, false); 2842 2858 } 2859 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock); 2843 2860 STAM_PROFILE_STOP(&pVM->tm.s.StatVirtualSyncFF, a); /* before the unlock! */ 2844 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock); 2845 TM_UNLOCK_TIMERS(pVM); 2861 PDMCritSectLeave(&pVM->tm.s.aTimerQueues[TMCLOCK_VIRTUAL].TimerLock); 2846 2862 } 2847 2863 } … … 3152 3168 { 3153 3169 VMCPU_ASSERT_EMT(pVCpu); 3170 PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_IGNORED); /* Paranoia: Exploiting the virtual sync lock here. */ 3154 3171 3155 3172 /* 3156 3173 * The shared virtual clock (includes virtual sync which is tied to it). 3157 3174 */ 3158 TM_LOCK_TIMERS(pVM); /* Paranoia: Exploiting the timer lock here. */3159 3175 int rc = tmVirtualPauseLocked(pVM); 3160 TM_UNLOCK_TIMERS(pVM); 3161 if (RT_FAILURE(rc)) 3162 return rc; 3176 AssertRCReturnStmt(rc, PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock), rc); 3163 3177 3164 3178 /* … … 3168 3182 if (!pVM->tm.s.fTSCTiedToExecution) 3169 3183 { 3170 TM_LOCK_TIMERS(pVM); /* Exploit the timer lock for synchronization. */3171 3184 rc = tmCpuTickPauseLocked(pVM, pVCpu); 3172 TM_UNLOCK_TIMERS(pVM); 3173 if (RT_FAILURE(rc)) 3174 return rc; 3185 AssertRCReturnStmt(rc, PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock), rc); 3175 3186 } 3176 3187 … … 3198 3209 #endif 3199 3210 3211 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock); 3200 3212 return VINF_SUCCESS; 3201 3213 } … … 3213 3225 { 3214 3226 VMCPU_ASSERT_EMT(pVCpu); 3215 int rc;3227 PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_IGNORED); /* Paranoia: Exploiting the virtual sync lock here. */ 3216 3228 3217 3229 #ifndef VBOX_WITHOUT_NS_ACCOUNTING … … 3233 3245 if (!pVM->tm.s.fTSCTiedToExecution) 3234 3246 { 3235 TM_LOCK_TIMERS(pVM); /* Exploit the timer lock for synchronization. */ 3236 rc = tmCpuTickResumeLocked(pVM, pVCpu); 3237 TM_UNLOCK_TIMERS(pVM); 3238 if (RT_FAILURE(rc)) 3239 return rc; 3247 int rc = tmCpuTickResumeLocked(pVM, pVCpu); 3248 AssertRCReturnStmt(rc, PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock), rc); 3240 3249 } 3241 3250 … … 3243 3252 * The shared virtual clock (includes virtual sync which is tied to it). 3244 3253 */ 3245 TM_LOCK_TIMERS(pVM); /* Paranoia: Exploiting the timer lock here. */ 3246 rc = tmVirtualResumeLocked(pVM); 3247 TM_UNLOCK_TIMERS(pVM); 3248 3254 int rc = tmVirtualResumeLocked(pVM); 3255 3256 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock); 3249 3257 return rc; 3250 3258 } … … 3288 3296 * TM level and make it affect TMR3UTCNow as well! */ 3289 3297 3298 PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_IGNORED); /* Paranoia: Exploiting the virtual sync lock here. */ 3299 3290 3300 /* 3291 3301 * If the time is running we'll have to pause it before we can change 3292 3302 * the warp drive settings. 3293 3303 */ 3294 TM_LOCK_TIMERS(pVM); /* Paranoia: Exploiting the timer lock here. */3295 3304 bool fPaused = !!pVM->tm.s.cVirtualTicking; 3296 3305 if (fPaused) /** @todo this isn't really working, but wtf. */ … … 3305 3314 if (fPaused) 3306 3315 TMR3NotifyResume(pVM, pVCpu); 3307 TM_UNLOCK_TIMERS(pVM); 3316 3317 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock); 3308 3318 return VINF_SUCCESS; 3309 3319 } … … 3896 3906 PTMTIMERQUEUE pQueue = &pVM->tm.s.aTimerQueues[idxQueue]; 3897 3907 PDMCritSectRwEnterShared(&pQueue->AllocLock, VERR_IGNORED); 3898 TM_LOCK_TIMERS(pVM); 3908 PDMCritSectEnter(&pQueue->TimerLock, VERR_IGNORED); 3909 3899 3910 for (PTMTIMERR3 pTimer = tmTimerQueueGetHead(pQueue, pQueue); 3900 3911 pTimer; … … 3914 3925 pTimer->szName); 3915 3926 } 3916 TM_UNLOCK_TIMERS(pVM); 3927 3928 PDMCritSectLeave(&pQueue->TimerLock); 3917 3929 PDMCritSectRwLeaveShared(&pQueue->AllocLock); 3918 3930 } -
trunk/src/VBox/VMM/include/TMInline.h
r87792 r87812 163 163 } 164 164 165 /** @def TMTIMER_HANDLE_TO_VARS_RETURN_EX 166 * Converts a timer handle to a timer pointer, returning @a a_rcRet if the 167 * handle is invalid. 168 * 169 * This defines the following variables: 170 * - idxQueue: The queue index. 171 * - pQueueCC: Pointer to the context specific queue data. 172 * - pTimer: The timer pointer. 173 * - idxTimer: The timer index. 174 * 175 * @param a_pVM The cross context VM structure. 176 * @param a_hTimer The timer handle to translate. 177 * @param a_rcRet What to return on failure. 178 * 179 * @note This macro has no scoping, so careful when using it around 180 * conditional statements! 181 */ 182 #ifdef IN_RING3 183 # define TMTIMER_HANDLE_TO_VARS_RETURN_EX(a_pVM, a_hTimer, a_rcRet) \ 184 uintptr_t const idxQueue = (uintptr_t)((a_hTimer) >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) \ 185 & (uintptr_t)TMTIMERHANDLE_QUEUE_IDX_SMASK; \ 186 AssertReturn(idxQueue < RT_ELEMENTS((a_pVM)->tm.s.aTimerQueues), a_rcRet); \ 187 PTMTIMERQUEUE const pQueue = &(a_pVM)->tm.s.aTimerQueues[idxQueue]; \ 188 PTMTIMERQUEUE const pQueueCC = pQueue; RT_NOREF(pQueueCC); \ 189 \ 190 uintptr_t const idxTimer = (uintptr_t)((a_hTimer) & TMTIMERHANDLE_TIMER_IDX_MASK); \ 191 AssertReturn(idxQueue < pQueue->cTimersAlloc, a_rcRet); \ 192 \ 193 PTMTIMER const pTimer = &pQueue->paTimers[idxTimer]; \ 194 AssertReturn(pTimer->hSelf == a_hTimer, a_rcRet) 195 #else 196 # define TMTIMER_HANDLE_TO_VARS_RETURN_EX(a_pVM, a_hTimer, a_rcRet) \ 197 uintptr_t const idxQueue = (uintptr_t)((a_hTimer) >> TMTIMERHANDLE_QUEUE_IDX_SHIFT) \ 198 & (uintptr_t)TMTIMERHANDLE_QUEUE_IDX_SMASK; \ 199 AssertReturn(idxQueue < RT_ELEMENTS((a_pVM)->tm.s.aTimerQueues), a_rcRet); \ 200 AssertCompile(RT_ELEMENTS((a_pVM)->tm.s.aTimerQueues) == RT_ELEMENTS((a_pVM)->tmr0.s.aTimerQueues)); \ 201 PTMTIMERQUEUE const pQueue = &(a_pVM)->tm.s.aTimerQueues[idxQueue]; RT_NOREF(pQueue); \ 202 PTMTIMERQUEUER0 const pQueueCC = &(a_pVM)->tmr0.s.aTimerQueues[idxQueue]; \ 203 \ 204 uintptr_t const idxTimer = (uintptr_t)((a_hTimer) & TMTIMERHANDLE_TIMER_IDX_MASK); \ 205 AssertReturn(idxQueue < pQueueCC->cTimersAlloc, a_rcRet); \ 206 \ 207 PTMTIMER const pTimer = &pQueueCC->paTimers[idxTimer]; \ 208 AssertReturn(pTimer->hSelf == a_hTimer, a_rcRet); \ 209 Assert(pTimer->fFlags & TMTIMER_FLAGS_RING0); \ 210 Assert(VM_IS_EMT(pVM)) 211 #endif 212 213 214 /** @def TMTIMER_HANDLE_TO_VARS_RETURN_EX 215 * Converts a timer handle to a timer pointer, returning VERR_INVALID_HANDLE if 216 * the handle is invalid. 217 * 218 * This defines the following variables: 219 * - idxQueue: The queue index. 220 * - pQueueCC: Pointer to the context specific queue data. 221 * - pTimer: The timer pointer. 222 * - idxTimer: The timer index. 223 * 224 * @param a_pVM The cross context VM structure. 225 * @param a_hTimer The timer handle to translate. 226 * 227 * @note This macro has no scoping, so careful when using it around 228 * conditional statements! 229 */ 230 #define TMTIMER_HANDLE_TO_VARS_RETURN(a_pVM, a_hTimer) TMTIMER_HANDLE_TO_VARS_RETURN_EX(a_pVM, a_hTimer, VERR_INVALID_HANDLE) 231 165 232 166 233 /** @def TMTIMER_HANDLE_TO_PTR_RETURN_EX -
trunk/src/VBox/VMM/include/TMInternal.h
r87794 r87812 284 284 bool fCannotGrow; 285 285 /** Align on 64-byte boundrary. */ 286 bool afAlignment[7]; 287 /** Lock serializing timer allocation and deallocation. */ 286 bool afAlignment1[7]; 287 /** Time spent doing scheduling and timer callbacks. */ 288 STAMPROFILE StatDo; 289 /** The current max timer Hz hint. */ 290 uint32_t volatile uMaxHzHint; 291 uint32_t u64Alignment2[7]; 292 /** Lock serializing the active timer list and associated work. */ 293 PDMCRITSECT TimerLock; 294 /** Lock serializing timer allocation and deallocation. 295 * @note This may be used in read-mode all over the place if we later 296 * implement runtime array growing. */ 288 297 PDMCRITSECTRW AllocLock; 289 298 } TMTIMERQUEUE; … … 391 400 typedef struct TM 392 401 { 402 /** Timer queues for the different clock types. 403 * @note is first in the structure to ensure cache-line alignment. */ 404 TMTIMERQUEUE aTimerQueues[TMCLOCK_MAX]; 405 393 406 /** The current TSC mode of the VM. 394 407 * Config variable: Mode (string). */ … … 496 509 } aVirtualSyncCatchUpPeriods[TM_MAX_CATCHUP_PERIODS]; 497 510 498 /** The current max timer Hz hint. */ 499 uint32_t volatile uMaxHzHint; 500 /** Whether to recalulate the HzHint next time its queried. */ 501 bool volatile fHzHintNeedsUpdating; 502 /** Alignment */ 503 bool afAlignment2[3]; 511 union 512 { 513 /** Combined value for updating. */ 514 uint64_t volatile u64Combined; 515 struct 516 { 517 /** Bitmap indicating which timer queues needs their uMaxHzHint updated. */ 518 uint32_t volatile bmNeedsUpdating; 519 /** The current max timer Hz hint. */ 520 uint32_t volatile uMax; 521 } s; 522 } HzHint; 504 523 /** @cfgm{/TM/HostHzMax, uint32_t, Hz, 0, UINT32_MAX, 20000} 505 524 * The max host Hz frequency hint returned by TMCalcHostTimerFrequency. */ … … 537 556 R3PTRTYPE(char *) pszAlignment2b; 538 557 539 /** Timer queues for the different clock types. */540 TMTIMERQUEUE aTimerQueues[TMCLOCK_MAX];541 542 558 /** Pointer to our RC mapping of the GIP. */ 543 559 RCPTRTYPE(void *) pvGIPRC; 544 560 /** Pointer to our R3 mapping of the GIP. */ 545 561 R3PTRTYPE(void *) pvGIPR3; 562 546 563 547 564 /** The schedule timer timer handle (runtime timer). … … 560 577 bool afAlignment3[2]; 561 578 562 /** Lock serializing access to the timer lists. */563 PDMCRITSECT TimerCritSect;564 579 /** Lock serializing access to the VirtualSync clock and the associated 565 580 * timer queue. */ … … 572 587 * @{ */ 573 588 STAMPROFILE StatDoQueues; 574 STAMPROFILEADV aStatDoQueues[TMCLOCK_MAX];575 589 /** @} */ 576 590 /** tmSchedule … … 836 850 DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra); 837 851 DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSBadCpuIndex(PRTTIMENANOTSDATA pData, PRTITMENANOTSEXTRA pExtra, 838 uint16_t idApic, uint16_t iCpuSet, uint16_t iGipCpu); 839 840 /** 841 * Try take the timer lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC. 842 * 843 * @retval VINF_SUCCESS on success (always in ring-3). 844 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy. 845 * 846 * @param a_pVM Pointer to the VM. 847 * 848 * @remarks The virtual sync timer queue requires the virtual sync lock. 849 */ 850 #define TM_LOCK_TIMERS(a_pVM) PDMCritSectEnter(&(a_pVM)->tm.s.TimerCritSect, VERR_SEM_BUSY) 851 852 /** 853 * Try take the timer lock, no waiting. 854 * 855 * @retval VINF_SUCCESS on success. 856 * @retval VERR_SEM_BUSY if busy. 857 * 858 * @param a_pVM Pointer to the VM. 859 * 860 * @remarks The virtual sync timer queue requires the virtual sync lock. 861 */ 862 #define TM_TRY_LOCK_TIMERS(a_pVM) PDMCritSectTryEnter(&(a_pVM)->tm.s.TimerCritSect) 863 864 /** Lock the timers (sans the virtual sync queue). */ 865 #define TM_UNLOCK_TIMERS(a_pVM) do { PDMCritSectLeave(&(a_pVM)->tm.s.TimerCritSect); } while (0) 866 867 /** Checks that the caller owns the timer lock. */ 868 #define TM_ASSERT_TIMER_LOCK_OWNERSHIP(a_pVM) \ 869 Assert(PDMCritSectIsOwner(&(a_pVM)->tm.s.TimerCritSect)) 870 852 uint16_t idApic, uint16_t iCpuSet, uint16_t iGipCpu); 871 853 /** @} */ 872 854 … … 874 856 875 857 #endif /* !VMM_INCLUDED_SRC_include_TMInternal_h */ 876 -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r87563 r87812 348 348 349 349 /* TM */ 350 CHECK_MEMBER_ALIGNMENT(TM, TimerCritSect, sizeof(uintptr_t));350 CHECK_MEMBER_ALIGNMENT(TM, aTimerQueues, 64); 351 351 CHECK_MEMBER_ALIGNMENT(TM, VirtualSyncLock, sizeof(uintptr_t)); 352 352
Note:
See TracChangeset
for help on using the changeset viewer.