- Timestamp:
- Dec 3, 2014 1:18:41 PM (10 years ago)
- Location:
- trunk/src/VBox
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/HostDrivers/Support/SUPDrv.c
r53396 r53430 121 121 * master with a timeout. */ 122 122 #define GIP_TSC_DELTA_SYNC_PRESTART_WORKER 5 123 /** The TSC-refinement interval in seconds. */ 124 #define GIP_TSC_REFINE_INTERVAL 5 123 125 124 126 AssertCompile(GIP_TSC_DELTA_PRIMER_LOOPS < GIP_TSC_DELTA_READ_TIME_LOOPS); … … 134 136 # define DO_NOT_START_GIP 135 137 #endif 138 139 /** Whether the application of TSC-deltas is required. */ 140 #define GIP_ARE_TSC_DELTAS_APPLICABLE(a_pGip) ((a_pGip)->u32Mode == SUPGIPMODE_INVARIANT_TSC && !g_fOsTscDeltasInSync) 136 141 137 142 … … 148 153 static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq); 149 154 static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq); 150 static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, 155 static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt,void *pvVMMR0EntryFast, void *pvVMMR0EntryEx); 151 156 static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt); 152 157 static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage); … … 164 169 static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick); 165 170 static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser); 166 static bool supdrvIsInvariantTsc(void); 167 static void supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys, 168 uint64_t u64NanoTS, unsigned uUpdateHz, unsigned uUpdateIntervalNS, unsigned cCpus); 171 static void supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys, uint64_t u64NanoTS, 172 unsigned uUpdateHz, unsigned uUpdateIntervalNS, unsigned cCpus); 169 173 static DECLCALLBACK(void) supdrvGipInitOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2); 170 174 static void supdrvGipTerm(PSUPGLOBALINFOPAGE pGip); … … 206 210 static volatile uint32_t g_cMpOnOffEvents; 207 211 /** TSC reading during start of TSC frequency refinement phase. */ 208 static uint64_t g_u64T SCAnchor;212 static uint64_t g_u64TscAnchor; 209 213 /** Timestamp (in nanosec) during start of TSC frequency refinement phase. */ 210 214 static uint64_t g_u64NanoTSAnchor; 215 /** Pointer to the timer used to refine the TSC frequency. */ 216 static PRTTIMER g_pTscRefineTimer; 211 217 /** Whether the host OS has already normalized the hardware TSC deltas across 212 218 * CPUs. */ … … 3927 3933 * updating. 3928 3934 * 3935 * @param pGip Pointer to the GIP. 3929 3936 * @param pGipCpu The per CPU structure for this CPU. 3930 3937 * @param u64NanoTS The current time. 3931 3938 */ 3932 static void supdrvGipReInitCpu(PSUPG IPCPU pGipCpu, uint64_t u64NanoTS)3933 { 3934 pGipCpu->u64TSC = ASMReadTSC() - pGipCpu->u32UpdateIntervalTSC;3939 static void supdrvGipReInitCpu(PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pGipCpu, uint64_t u64NanoTS) 3940 { 3941 pGipCpu->u64TSC = SUPReadTsc() - pGipCpu->u32UpdateIntervalTSC; 3935 3942 pGipCpu->u64NanoTS = u64NanoTS; 3936 3943 } … … 3950 3957 3951 3958 if (RT_LIKELY(iCpu < pGip->cCpus && pGip->aCPUs[iCpu].idCpu == idCpu)) 3952 supdrvGipReInitCpu( &pGip->aCPUs[iCpu], *(uint64_t *)pvUser2);3959 supdrvGipReInitCpu(pGip, &pGip->aCPUs[iCpu], *(uint64_t *)pvUser2); 3953 3960 3954 3961 NOREF(pvUser2); … … 4030 4037 * The more interrupts the better... 4031 4038 */ 4039 /** @todo On Windows, RTTimerRequestSystemGranularity() always succeeds, so 4040 * whats the point of the remaining calls? */ 4032 4041 if ( RT_SUCCESS_NP(RTTimerRequestSystemGranularity( 976563 /* 1024 HZ */, &u32SystemResolution)) 4033 4042 || RT_SUCCESS_NP(RTTimerRequestSystemGranularity( 1000000 /* 1000 HZ */, &u32SystemResolution)) … … 4050 4059 4051 4060 u64NanoTS = RTTimeSystemNanoTS() - pGipR0->u32UpdateIntervalNS; 4052 if ( pGipR0->u32Mode == SUPGIPMODE_SYNC_TSC 4061 if ( pGipR0->u32Mode == SUPGIPMODE_INVARIANT_TSC 4062 || pGipR0->u32Mode == SUPGIPMODE_SYNC_TSC 4053 4063 || RTMpGetOnlineCount() == 1) 4054 supdrvGipReInitCpu( &pGipR0->aCPUs[0], u64NanoTS);4064 supdrvGipReInitCpu(pGipR0, &pGipR0->aCPUs[0], u64NanoTS); 4055 4065 else 4056 4066 RTMpOnAll(supdrvGipReInitCpuCallback, pGipR0, &u64NanoTS); 4057 4067 4058 4068 #ifndef DO_NOT_START_GIP 4059 rc = RTTimerStart(pDevExt->pGipTimer, 0 ); AssertRC(rc);4069 rc = RTTimerStart(pDevExt->pGipTimer, 0 /* fire ASAP */); AssertRC(rc); 4060 4070 #endif 4061 4071 rc = VINF_SUCCESS; … … 5610 5620 5611 5621 5622 /** 5623 * Returns whether the host CPU sports an invariant TSC or not. 5624 * 5625 * @returns true if invariant TSC is supported, false otherwise. 5626 */ 5627 static bool supdrvIsInvariantTsc(void) 5628 { 5629 static bool s_fQueried = false; 5630 static bool s_fIsInvariantTsc = false; 5631 if (!s_fQueried) 5632 { 5633 uint32_t uEax, uEbx, uEcx, uEdx; 5634 ASMCpuId(0x80000000, &uEax, &uEbx, &uEcx, &uEdx); 5635 if (uEax >= 0x80000007) 5636 { 5637 ASMCpuId(0x80000007, &uEax, &uEbx, &uEcx, &uEdx); 5638 if (uEdx & X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR) 5639 s_fIsInvariantTsc = true; 5640 } 5641 s_fQueried = true; 5642 } 5643 5644 return s_fIsInvariantTsc; 5645 } 5646 5647 5612 5648 #ifdef SUPDRV_USE_TSC_DELTA_THREAD 5613 5649 /** … … 5702 5738 cConsecutiveTimeouts = 0; 5703 5739 if (!cTimesMeasured++) 5740 { 5704 5741 rc = supdrvMeasureTscDeltas(pDevExt, NULL /* pidxMaster */); 5742 RTCpuSetCopy(&pDevExt->TscDeltaObtainedCpuSet, &pDevExt->pGip->OnlineCpuSet); 5743 } 5705 5744 else 5706 5745 { … … 5721 5760 rc |= supdrvMeasureTscDeltaOne(pDevExt, iCpu); 5722 5761 RTCpuSetDel(&pDevExt->TscDeltaCpuSet, pGipCpuWorker->idCpu); 5762 if (pGipCpuWorker->i64TSCDelta != INT64_MAX) 5763 RTCpuSetAdd(&pDevExt->TscDeltaObtainedCpuSet, pGipCpuWorker->idCpu); 5723 5764 } 5724 5765 } … … 5831 5872 { 5832 5873 /* Signal a few more times before giving up. */ 5833 int cTries = 5;5834 while (--cTries > 0)5874 int cTriesLeft = 5; 5875 while (--cTriesLeft > 0) 5835 5876 { 5836 5877 RTThreadUserSignal(pDevExt->hTscDeltaThread); … … 5856 5897 * notifications! 5857 5898 */ 5858 static int supdrvTscDeltaInit(PSUPDRVDEVEXT pDevExt) 5859 { 5860 Assert(!g_fOsTscDeltasInSync); 5899 static int supdrvTscDeltaThreadInit(PSUPDRVDEVEXT pDevExt) 5900 { 5901 Assert(GIP_ARE_TSC_DELTAS_APPLICABLE(pDevExt->pGip)); 5902 5861 5903 int rc = RTSpinlockCreate(&pDevExt->hTscDeltaSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "VBoxTscSpnLck"); 5862 5904 if (RT_SUCCESS(rc)) … … 5868 5910 pDevExt->cMsTscDeltaTimeout = 1; 5869 5911 RTCpuSetEmpty(&pDevExt->TscDeltaCpuSet); 5912 RTCpuSetEmpty(&pDevExt->TscDeltaObtainedCpuSet); 5870 5913 rc = RTThreadCreate(&pDevExt->hTscDeltaThread, supdrvTscDeltaThread, pDevExt, 0 /* cbStack */, 5871 5914 RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "VBoxTscThread"); … … 5926 5969 pDevExt->rcTscDelta = VERR_NOT_AVAILABLE; 5927 5970 } 5971 5972 5973 /** 5974 * Waits for TSC-delta measurements to be completed for all online CPUs. 5975 * 5976 * @returns VBox status code. 5977 * @param pDevExt Pointer to the device instance data. 5978 */ 5979 static int supdrvTscDeltaThreadWaitForOnlineCpus(PSUPDRVDEVEXT pDevExt) 5980 { 5981 int cTriesLeft = 5; 5982 int cMsTotalWait; 5983 int cMsWaited = 0; 5984 int cMsWaitGranularity = 1; 5985 5986 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip; 5987 AssertReturn(pGip, VERR_INVALID_POINTER); 5988 5989 cMsTotalWait = RT_MIN(pGip->cPresentCpus + 2, 150); 5990 while (cTriesLeft-- > 0) 5991 { 5992 if (RTCpuSetIsEqual(&pDevExt->TscDeltaObtainedCpuSet, &pGip->OnlineCpuSet)) 5993 return VINF_SUCCESS; 5994 RTThreadSleep(cMsWaitGranularity); 5995 cMsWaited += cMsWaitGranularity; 5996 if (cMsWaited >= cMsTotalWait) 5997 break; 5998 } 5999 6000 return VERR_TIMEOUT; 6001 } 5928 6002 #endif /* SUPDRV_USE_TSC_DELTA_THREAD */ 5929 6003 … … 5935 6009 * the CPU frequency up, while for the invariant cases using a sleeping method. 5936 6010 * 5937 * The TSC frequency can vary on systems thatare not reported as invariant.5938 * However, on such systems the object of this function is to find out what the5939 * nominal, maximum TSC frequency under normalCPU operation.6011 * The TSC frequency can vary on systems which are not reported as invariant. 6012 * On such systems the object of this function is to find out what the nominal, 6013 * maximum TSC frequency under 'normal' CPU operation. 5940 6014 * 5941 6015 * @returns VBox status code. 5942 * @param p Gip Pointer to the GIP.5943 * 5944 * @remarks Must be called only aftermeasuring the TSC deltas.5945 */ 5946 static int supdrvGipMeasureTscFreq(PSUP GLOBALINFOPAGE pGip)6016 * @param pDevExt Pointer to the device instance. 6017 * 6018 * @remarks Must be called only -after- measuring the TSC deltas. 6019 */ 6020 static int supdrvGipMeasureTscFreq(PSUPDRVDEVEXT pDevExt) 5947 6021 { 5948 6022 int cTriesLeft = 4; 6023 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip; 5949 6024 5950 6025 /* Assert order. */ … … 5976 6051 ASMSetFlags(uFlags); 5977 6052 5978 /* Activate this when implemented invariant TSC GIP mode. Otherwise systems that are really invariant 5979 which get detected as async will break. */ 5980 #if 0 5981 if (supdrvIsInvariantTsc()) 6053 if (pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC) 5982 6054 { 5983 6055 /* 5984 * Sleep wait since the TSC frequency is constant,eases host load.6056 * Sleep-wait since the TSC frequency is constant, it eases host load. 5985 6057 * Shorter interval produces more variance in the frequency (esp. Windows). 5986 6058 */ … … 5992 6064 } 5993 6065 else 5994 #endif5995 6066 { 5996 6067 /* Busy-wait keeping the frequency up and measure. */ … … 6010 6081 ASMSetFlags(uFlags); 6011 6082 6012 /* Activate this when implemented invariant TSC GIP mode. Otherwise systems that are really invariant 6013 which get detected as async will break. */ 6014 #if 0 6015 if (supdrvIsInvariantTsc()) /** @todo replace with enum check. */ 6083 if (GIP_ARE_TSC_DELTAS_APPLICABLE(pGip)) 6016 6084 { 6017 6085 int rc; … … 6024 6092 || !fAppliedAfter) 6025 6093 { 6094 #ifdef SUPDRV_USE_TSC_DELTA_THREAD 6095 /* 6096 * The TSC-delta measurements are kicked-off asynchronously as each host CPU is initialized. 6097 * Therefore, if we failed to have a delta for the CPU(s) we were scheduled on (idApicBefore 6098 * and idApicAfter) then wait until we have TSC-delta measurements for all online CPUs and 6099 * proceed. This should be triggered just once if we're rather unlucky. 6100 */ 6101 rc = supdrvTscDeltaThreadWaitForOnlineCpus(pDevExt); 6102 if (rc == VERR_TIMEOUT) 6103 { 6104 SUPR0Printf("vboxdrv: supdrvGipMeasureTscFreq: timedout waiting for TSC-delta measurements.\n"); 6105 return VERR_SUPDRV_TSC_FREQ_MEASUREMENT_FAILED; 6106 } 6107 #else 6026 6108 SUPR0Printf("vboxdrv: supdrvGipMeasureTscFreq: idApicBefore=%u idApicAfter=%u cTriesLeft=%u\n", 6027 6109 idApicBefore, idApicAfter, cTriesLeft); 6110 #endif 6028 6111 continue; 6029 6112 } 6030 6113 } 6031 #endif6032 6114 6033 6115 /* … … 6035 6117 */ 6036 6118 pGip->u64CpuHz = ((u64TscAfter - u64TscBefore) * RT_NS_1SEC_64) / (u64NanoTsAfter - u64NanoTsBefore); 6119 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC) 6120 pGip->aCPUs[0].u64CpuHz = pGip->u64CpuHz; 6037 6121 return VINF_SUCCESS; 6038 6122 } 6039 6123 6040 6124 return VERR_SUPDRV_TSC_FREQ_MEASUREMENT_FAILED; 6125 } 6126 6127 6128 /** 6129 * Timer callback function for TSC frequency refinement in invariant GIP mode. 6130 * 6131 * @param pTimer The timer. 6132 * @param pvUser Opaque pointer to the GIP. 6133 * @param iTick The timer tick. 6134 */ 6135 static DECLCALLBACK(void) supdrvRefineTscTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick) 6136 { 6137 uint8_t idApic; 6138 uint64_t u64DeltaNanoTS; 6139 uint64_t u64DeltaTsc; 6140 uint64_t u64NanoTS; 6141 uint64_t u64Tsc; 6142 RTCCUINTREG uFlags; 6143 bool fDeltaApplied = false; 6144 PSUPGLOBALINFOPAGE pGip = (PSUPGLOBALINFOPAGE)pvUser; 6145 6146 /* Paranoia. */ 6147 Assert(pGip); 6148 Assert(pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC); 6149 6150 u64NanoTS = RTTimeSystemNanoTS(); 6151 while (RTTimeSystemNanoTS() == u64NanoTS) 6152 ASMNopPause(); 6153 uFlags = ASMIntDisableFlags(); 6154 idApic = ASMGetApicId(); 6155 u64Tsc = ASMReadTSC(); 6156 u64NanoTS = RTTimeSystemNanoTS(); 6157 ASMSetFlags(uFlags); 6158 SUPTscDeltaApply(pGip, &u64Tsc, idApic, &fDeltaApplied); 6159 u64DeltaNanoTS = u64NanoTS - g_u64NanoTSAnchor; 6160 u64DeltaTsc = u64Tsc - g_u64TscAnchor; 6161 6162 if ( pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC 6163 && !fDeltaApplied) 6164 { 6165 SUPR0Printf("vboxdrv: failed to refine TSC frequency as TSC-deltas unavailable after %d seconds!\n", 6166 GIP_TSC_REFINE_INTERVAL); 6167 return; 6168 } 6169 6170 /* Calculate the TSC frequency. */ 6171 if ( u64DeltaTsc < UINT64_MAX / RT_NS_1SEC 6172 && u64DeltaNanoTS < UINT32_MAX) 6173 pGip->u64CpuHz = ASMMultU64ByU32DivByU32(u64DeltaTsc, RT_NS_1SEC, u64DeltaNanoTS); 6174 else 6175 { 6176 /* Try not to lose precision, the larger the interval the more likely we overflow. */ 6177 if ( u64DeltaTsc < UINT64_MAX / RT_NS_100MS 6178 && u64DeltaNanoTS / 10 < UINT32_MAX) 6179 pGip->u64CpuHz = ASMMultU64ByU32DivByU32(u64DeltaTsc, RT_NS_100MS, u64DeltaNanoTS / 10); 6180 else if ( u64DeltaTsc < UINT64_MAX / RT_NS_10MS 6181 && u64DeltaNanoTS / 100 < UINT32_MAX) 6182 pGip->u64CpuHz = ASMMultU64ByU32DivByU32(u64DeltaTsc, RT_NS_10MS, u64DeltaNanoTS / 100); 6183 else if ( u64DeltaTsc < UINT64_MAX / RT_NS_1MS 6184 && u64DeltaNanoTS / 1000 < UINT32_MAX) 6185 pGip->u64CpuHz = ASMMultU64ByU32DivByU32(u64DeltaTsc, RT_NS_1MS, u64DeltaNanoTS / 1000); 6186 else /* Screw it. */ 6187 pGip->u64CpuHz = u64DeltaTsc / (u64DeltaNanoTS / RT_NS_1SEC_64); 6188 } 6189 6190 /* Update rest of GIP. */ 6191 Assert(pGip->u32Mode != SUPGIPMODE_ASYNC_TSC); /* See SUPGetCpuHzFromGIP().*/ 6192 pGip->aCPUs[0].u64CpuHz = pGip->u64CpuHz; 6193 } 6194 6195 6196 /** 6197 * Starts the TSC-frequency refinement phase asynchronously. 6198 * 6199 * @param pDevExt Pointer to the device instance data. 6200 */ 6201 static void supdrvRefineTscFreq(PSUPDRVDEVEXT pDevExt) 6202 { 6203 uint64_t u64NanoTS; 6204 RTCCUINTREG uFlags; 6205 uint8_t idApic; 6206 int rc; 6207 bool fDeltaApplied = false; 6208 PSUPGLOBALINFOPAGE pGip; 6209 6210 /* Validate. */ 6211 Assert(pDevExt); 6212 Assert(pDevExt->pGip); 6213 6214 pGip = pDevExt->pGip; 6215 u64NanoTS = RTTimeSystemNanoTS(); 6216 while (RTTimeSystemNanoTS() == u64NanoTS) 6217 ASMNopPause(); 6218 uFlags = ASMIntDisableFlags(); 6219 idApic = ASMGetApicId(); 6220 g_u64TscAnchor = ASMReadTSC(); 6221 g_u64NanoTSAnchor = RTTimeSystemNanoTS(); 6222 ASMSetFlags(uFlags); 6223 SUPTscDeltaApply(pGip, &g_u64TscAnchor, idApic, &fDeltaApplied); 6224 6225 #ifdef SUPDRV_USE_TSC_DELTA_THREAD 6226 if ( pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC 6227 && !fDeltaApplied) 6228 { 6229 rc = supdrvTscDeltaThreadWaitForOnlineCpus(pDevExt); 6230 if (rc == VERR_TIMEOUT) 6231 { 6232 SUPR0Printf("vboxdrv: Skipping refinement of TSC frequency as TSC-delta measurement timed out!\n"); 6233 return; 6234 } 6235 } 6236 #endif 6237 6238 rc = RTTimerCreateEx(&g_pTscRefineTimer, 0 /* one-shot */, RTTIMER_FLAGS_CPU_ANY, supdrvRefineTscTimer, pGip); 6239 if (RT_SUCCESS(rc)) 6240 { 6241 /* 6242 * Refine the TSC frequency measurement over a long interval. Ideally, we want to keep the 6243 * interval as small as possible while gaining the most consistent and accurate frequency 6244 * (compared to what the host OS might have measured). 6245 * 6246 * In theory, we gain more accuracy with longer intervals, but we want VMs to startup with the 6247 * same TSC frequency whenever possible so we need to keep the interval short. 6248 */ 6249 rc = RTTimerStart(g_pTscRefineTimer, GIP_TSC_REFINE_INTERVAL * RT_NS_1SEC_64); 6250 AssertRC(rc); 6251 } 6252 else 6253 OSDBGPRINT(("RTTimerCreateEx failed to create one-shot timer. rc=%Rrc\n", rc)); 6041 6254 } 6042 6255 … … 6107 6320 && !supdrvOSGetForcedAsyncTscMode(pDevExt))) 6108 6321 { 6322 /* Basically invariant Windows boxes, should never be detected as async. */ 6109 6323 OSDBGPRINT(("supdrvGipCreate: The TSC-deltas should be normalized by the host OS, but verifying shows it's not!\n")); 6110 6324 return VERR_INTERNAL_ERROR_2; … … 6112 6326 6113 6327 #ifdef SUPDRV_USE_TSC_DELTA_THREAD 6114 if ( !g_fOsTscDeltasInSync)6328 if (GIP_ARE_TSC_DELTAS_APPLICABLE(pGip)) 6115 6329 { 6116 6330 /* Initialize TSC-delta measurement thread before executing any Mp event callbacks. */ 6117 rc = supdrvTscDelta Init(pDevExt);6331 rc = supdrvTscDeltaThreadInit(pDevExt); 6118 6332 } 6119 6333 #endif … … 6126 6340 if (RT_SUCCESS(rc)) 6127 6341 { 6342 uint16_t iCpu; 6128 6343 #ifndef SUPDRV_USE_TSC_DELTA_THREAD 6129 uint16_t iCpu; 6130 if (!g_fOsTscDeltasInSync) 6344 if (GIP_ARE_TSC_DELTAS_APPLICABLE(pGip)) 6131 6345 { 6132 6346 /* … … 6151 6365 if (RT_SUCCESS(rc)) 6152 6366 { 6153 rc = supdrvGipMeasureTscFreq(p Gip);6367 rc = supdrvGipMeasureTscFreq(pDevExt); 6154 6368 if (RT_SUCCESS(rc)) 6155 6369 { 6156 if (supdrvIsInvariantTsc())6157 pGip->aCPUs[0].u64CpuHz = pGip->u64CpuHz;6158 6159 6370 /* 6160 6371 * Create the timer. … … 6180 6391 Log(("supdrvGipCreate: %u ns interval.\n", u32Interval)); 6181 6392 g_pSUPGlobalInfoPage = pGip; 6393 if (pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC) 6394 supdrvRefineTscFreq(pDevExt); 6182 6395 return VINF_SUCCESS; 6183 6396 } 6184 6397 else 6185 6398 { 6186 OSDBGPRINT(("supdrvGipCreate: RTTimerCreateEx failed (%u ns interval). rc=%Rrc\n", u32Interval, rc));6399 OSDBGPRINT(("supdrvGipCreate: failed create GIP timer at %u ns interval. rc=%Rrc\n", u32Interval, rc)); 6187 6400 Assert(!pDevExt->pGipTimer); 6188 6401 } … … 6235 6448 6236 6449 /* 6450 * Destroy the TSC-refinement one-shot timer. 6451 */ 6452 if (g_pTscRefineTimer) 6453 { 6454 RTTimerDestroy(g_pTscRefineTimer); 6455 g_pTscRefineTimer = NULL; 6456 } 6457 6458 /* 6237 6459 * Invalid the GIP data. 6238 6460 */ … … 6274 6496 * Timer callback function sync GIP mode. 6275 6497 * @param pTimer The timer. 6276 * @param pvUser The device extension. 6498 * @param pvUser Opaque pointer to the device extension. 6499 * @param iTick The timer tick. 6277 6500 */ 6278 6501 static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick) 6279 6502 { 6280 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */ 6281 uint64_t u64TSC = ASMReadTSC(); 6282 uint64_t NanoTS = RTTimeSystemNanoTS(); 6283 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser; 6284 6285 if (supdrvIsInvariantTsc()) 6286 { 6287 PSUPGIPCPU pGipCpu; 6288 unsigned iCpu; 6289 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip; 6290 uint8_t idApic = ASMGetApicId(); 6291 6292 iCpu = pGip->aiCpuFromApicId[idApic]; 6293 Assert(iCpu < pGip->cCpus); 6294 pGipCpu = &pGip->aCPUs[iCpu]; 6295 Assert(pGipCpu->idCpu == RTMpCpuId()); 6296 6503 RTCCUINTREG uFlags; 6504 uint64_t u64TSC; 6505 uint64_t u64NanoTS; 6506 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser; 6507 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip; 6508 6509 /* 6510 * Synchronize with the host OS clock tick before reading the TSC. 6511 * Especially important on Windows where the granularity is terrible. 6512 */ 6513 u64NanoTS = RTTimeSystemNanoTS(); 6514 while (u64NanoTS == RTTimeSystemNanoTS()) 6515 ASMNopPause(); 6516 6517 uFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */ 6518 u64TSC = ASMReadTSC(); 6519 u64NanoTS = RTTimeSystemNanoTS(); 6520 6521 if (GIP_ARE_TSC_DELTAS_APPLICABLE(pGip)) 6522 { 6297 6523 /* 6298 6524 * The calculations in supdrvGipUpdate() is very timing sensitive and doesn't handle … … 6304 6530 * fire on the CPU they were registered/started on. Darwin, Solaris need verification. 6305 6531 */ 6306 if (pGipCpu->i64TSCDelta != INT64_MAX) 6307 u64TSC -= pGipCpu->i64TSCDelta; 6308 } 6309 6310 supdrvGipUpdate(pDevExt, NanoTS, u64TSC, NIL_RTCPUID, iTick); 6311 6312 ASMSetFlags(fOldFlags); 6313 6314 if (supdrvIsInvariantTsc()) 6315 { 6316 /* 6317 * Refine the TSC frequency measurement over a longer interval. Ideally, we want to keep the 6318 * interval as small as possible while gaining the most consistent and accurate frequency 6319 * (compared to what the host OS might have measured). 6320 * 6321 * In theory, we gain more accuracy with longer intervals, but we want VMs to startup with the 6322 * same TSC frequency whenever possible so we need to keep the interval short. 6323 */ 6324 uint8_t idApic; 6325 uint64_t u64NanoTS; 6326 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip; 6327 const int cSeconds = 3; 6328 if (RT_UNLIKELY(iTick == 3)) /* Helps with more consistent values across multiple runs (esp. Windows). */ 6329 { 6330 u64NanoTS = RTTimeSystemNanoTS(); 6331 while (RTTimeSystemNanoTS() == u64NanoTS) 6332 ASMNopPause(); 6333 fOldFlags = ASMIntDisableFlags(); 6334 idApic = ASMGetApicId(); 6335 g_u64TSCAnchor = ASMReadTSC(); 6336 g_u64NanoTSAnchor = RTTimeSystemNanoTS(); 6337 ASMSetFlags(fOldFlags); 6338 SUPTscDeltaApply(pGip, &g_u64TSCAnchor, idApic, NULL /* pfDeltaApplied */); 6339 ++g_u64TSCAnchor; 6340 } 6341 else if (g_u64TSCAnchor) 6342 { 6343 uint64_t u64DeltaNanoTS; 6344 u64NanoTS = RTTimeSystemNanoTS(); 6345 while (RTTimeSystemNanoTS() == u64NanoTS) 6346 ASMNopPause(); 6347 fOldFlags = ASMIntDisableFlags(); 6348 idApic = ASMGetApicId(); 6349 u64TSC = ASMReadTSC(); 6350 u64NanoTS = RTTimeSystemNanoTS(); 6351 ASMSetFlags(fOldFlags); 6352 SUPTscDeltaApply(pGip, &u64TSC, idApic, NULL /* pfDeltaApplied */); 6353 u64DeltaNanoTS = u64NanoTS - g_u64NanoTSAnchor; 6354 if (u64DeltaNanoTS >= cSeconds * RT_NS_1SEC_64) 6355 { 6356 uint16_t iCpu; 6357 if (u64DeltaNanoTS < UINT32_MAX) 6358 pGip->u64CpuHz = ASMMultU64ByU32DivByU32(u64TSC - g_u64TSCAnchor, RT_NS_1SEC, u64DeltaNanoTS); 6359 else 6360 pGip->u64CpuHz = (u64TSC - g_u64TSCAnchor) / (u64DeltaNanoTS / RT_NS_1SEC); 6361 6362 pGip->aCPUs[0].u64CpuHz = pGip->u64CpuHz; 6363 g_u64TSCAnchor = 0; 6364 } 6365 } 6366 } 6532 Assert(!ASMIntAreEnabled()); 6533 SUPTscDeltaApply(pGip, &u64TSC, ASMGetApicId(), NULL /* pfDeltaApplied */); 6534 } 6535 6536 supdrvGipUpdate(pDevExt, u64NanoTS, u64TSC, NIL_RTCPUID, iTick); 6537 6538 ASMSetFlags(uFlags); 6367 6539 } 6368 6540 … … 6371 6543 * Timer callback function for async GIP mode. 6372 6544 * @param pTimer The timer. 6373 * @param pvUser The device extension. 6545 * @param pvUser Opaque pointer to the device extension. 6546 * @param iTick The timer tick. 6374 6547 */ 6375 6548 static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick) … … 6491 6664 * update the state and it'll get serviced when the thread's listening interval times out. 6492 6665 */ 6493 if ( !g_fOsTscDeltasInSync 6494 && supdrvIsInvariantTsc()) 6495 { 6496 RTCpuSetAdd(&pDevExt->TscDeltaCpuSet, idCpu); 6666 if (GIP_ARE_TSC_DELTAS_APPLICABLE(pGip)) 6667 { 6497 6668 RTSpinlockAcquire(pDevExt->hTscDeltaSpinlock); 6498 6669 if ( pDevExt->enmTscDeltaState == kSupDrvTscDeltaState_Listening 6499 6670 || pDevExt->enmTscDeltaState == kSupDrvTscDeltaState_Measuring) 6500 6671 { 6672 RTCpuSetAdd(&pDevExt->TscDeltaCpuSet, idCpu); 6501 6673 pDevExt->enmTscDeltaState = kSupDrvTscDeltaState_WaitAndMeasure; 6502 6674 } … … 6550 6722 } 6551 6723 6552 /* Reset the TSC delta (if required), we will recalculate it lazily. */6553 if ( !g_fOsTscDeltasInSync)6724 /* Reset the TSC delta, we will recalculate it lazily. */ 6725 if (GIP_ARE_TSC_DELTAS_APPLICABLE(pGip)) 6554 6726 ASMAtomicWriteS64(&pGip->aCPUs[i].i64TSCDelta, INT64_MAX); 6727 6728 #ifdef SUPDRV_USE_TSC_DELTA_THREAD 6729 /* Remove this CPU from the set of CPUs that we have obtained the TSC deltas. */ 6730 if (supdrvIsInvariantTsc()) 6731 RTCpuSetDel(&pDevExt->TscDeltaObtainedCpuSet, idCpu); 6732 #endif 6555 6733 6556 6734 /* commit it */ … … 6653 6831 } 6654 6832 } 6655 }6656 6657 6658 /**6659 * Returns whether the host CPU sports an invariant TSC or not.6660 *6661 * @returns true if invariant TSC is supported, false otherwise.6662 */6663 static bool supdrvIsInvariantTsc(void)6664 {6665 static bool s_fQueried = false;6666 static bool s_fIsInvariantTsc = false;6667 if (!s_fQueried)6668 {6669 uint32_t uEax, uEbx, uEcx, uEdx;6670 ASMCpuId(0x80000000, &uEax, &uEbx, &uEcx, &uEdx);6671 if (uEax >= 0x80000007)6672 {6673 ASMCpuId(0x80000007, &uEax, &uEbx, &uEcx, &uEdx);6674 if (uEdx & X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR)6675 s_fIsInvariantTsc = true;6676 }6677 s_fQueried = true;6678 }6679 6680 return s_fIsInvariantTsc;6681 6833 } 6682 6834 … … 6927 7079 AssertReturn(pDevExt, VERR_INVALID_PARAMETER); 6928 7080 AssertReturn(pDevExt->pGip, VERR_INVALID_PARAMETER); 6929 Assert(!g_fOsTscDeltasInSync);6930 7081 6931 7082 pGip = pDevExt->pGip; 6932 7083 idMaster = pDevExt->idGipMaster; 6933 7084 pGipCpuWorker = &pGip->aCPUs[idxWorker]; 7085 7086 Assert(GIP_ARE_TSC_DELTAS_APPLICABLE(pGip)); 6934 7087 6935 7088 if (pGipCpuWorker->idCpu == idMaster) … … 6992 7145 uint32_t cOnlineCpus = pGip->cOnlineCpus; 6993 7146 6994 Assert(!g_fOsTscDeltasInSync); 6995 6996 /* 6997 * If we determined the TSC is async., don't bother with measuring deltas. 6998 */ 6999 if (RT_UNLIKELY(pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)) 7000 return VINF_SUCCESS; 7147 Assert(GIP_ARE_TSC_DELTAS_APPLICABLE(pGip)); 7001 7148 7002 7149 /* … … 7170 7317 static SUPGIPMODE supdrvGipDetermineTscMode(PSUPDRVDEVEXT pDevExt) 7171 7318 { 7172 #if 0 7319 /* Trust CPUs that declare their TSC to be invariant. */ 7173 7320 if (supdrvIsInvariantTsc()) 7174 return SUPGIPMODE_SYNC_TSC; /** @todo Switch to SUPGIPMODE_INVARIANT_TSC later. */ 7175 #endif 7176 7177 /* 7178 * On SMP we're faced with two problems: 7321 return SUPGIPMODE_INVARIANT_TSC; 7322 7323 /* 7324 * Without invariant CPU ID bit - On SMP we're faced with two problems: 7179 7325 * (1) There might be a skew between the CPU, so that cpu0 7180 7326 * returns a TSC that is slightly different from cpu1. … … 7203 7349 * won't trust it unless it has the TscInvariant bit is set. 7204 7350 */ 7351 /** @todo this is now redundant. remove later. */ 7205 7352 /* Check for "AuthenticAMD" */ 7206 7353 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX); … … 7213 7360 { 7214 7361 ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX); 7215 if ( !(uEDX & RT_BIT(8))/* TscInvariant */7362 if ( !(uEDX & X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR) /* TscInvariant */ 7216 7363 && (uEDX & 0x3e)) /* STC|TM|THERMTRIP|VID|FID. Ignore TS. */ 7217 7364 return SUPGIPMODE_ASYNC_TSC; … … 7409 7556 * Calc TSC delta. 7410 7557 */ 7411 /** @todo validate the NanoTS delta, don't trust the OS to call us when it should... */7412 7558 u64TSCDelta = u64TSC - pGipCpu->u64TSC; 7413 7559 ASMAtomicWriteU64(&pGipCpu->u64TSC, u64TSC); 7560 7561 /* We don't need to keep realculating the frequency when it's invariant. */ 7562 if (pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC) 7563 return; 7414 7564 7415 7565 if (u64TSCDelta >> 32) … … 7437 7587 7438 7588 /* 7589 * Validate the NanoTS deltas between timer fires with an arbitrary threshold of 0.5%. 7590 * Wait until we have at least one full history since the above history reset. The 7591 * assumption is that the majority of the previous history values will be tolerable. 7592 * See @bugref{6710} comment #67. 7593 */ 7594 if ( u32TransactionId > 23 /* 7 + (8 * 2) */ 7595 && pGip->u32Mode != SUPGIPMODE_ASYNC_TSC) 7596 { 7597 uint32_t uNanoTsThreshold = pGip->u32UpdateIntervalNS / 200; 7598 if ( pGipCpu->u32PrevUpdateIntervalNS > pGip->u32UpdateIntervalNS + uNanoTsThreshold 7599 || pGipCpu->u32PrevUpdateIntervalNS < pGip->u32UpdateIntervalNS - uNanoTsThreshold) 7600 { 7601 uint32_t u32; 7602 u32 = pGipCpu->au32TSCHistory[0]; 7603 u32 += pGipCpu->au32TSCHistory[1]; 7604 u32 += pGipCpu->au32TSCHistory[2]; 7605 u32 += pGipCpu->au32TSCHistory[3]; 7606 u32 >>= 2; 7607 u64TSCDelta = pGipCpu->au32TSCHistory[4]; 7608 u64TSCDelta += pGipCpu->au32TSCHistory[5]; 7609 u64TSCDelta += pGipCpu->au32TSCHistory[6]; 7610 u64TSCDelta += pGipCpu->au32TSCHistory[7]; 7611 u64TSCDelta >>= 2; 7612 u64TSCDelta += u32; 7613 u64TSCDelta >>= 1; 7614 } 7615 } 7616 7617 7618 /* 7439 7619 * TSC History. 7440 7620 */ … … 7451 7631 * However, this problem existed before the invariant mode was introduced. 7452 7632 */ 7453 if ( supdrvIsInvariantTsc()7633 if ( pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC 7454 7634 || pGip->u32UpdateHz >= 1000) 7455 7635 { … … 7489 7669 ASMAtomicWriteU32(&pGipCpu->u32UpdateIntervalTSC, u32UpdateIntervalTSC + u32UpdateIntervalTSCSlack); 7490 7670 7491 if (supdrvIsInvariantTsc())7492 return;7493 7494 7671 /* 7495 7672 * CpuHz. … … 7548 7725 * Recalc the update frequency every 0x800th time. 7549 7726 */ 7550 if (!(pGipCpu->u32TransactionId & (GIP_UPDATEHZ_RECALC_FREQ * 2 - 2))) 7727 if ( pGip->u32Mode != SUPGIPMODE_INVARIANT_TSC /* cuz we're not recalculating the frequency on invariants hosts. */ 7728 && !(pGipCpu->u32TransactionId & (GIP_UPDATEHZ_RECALC_FREQ * 2 - 2))) 7551 7729 { 7552 7730 if (pGip->u64NanoTSLastUpdateHz) … … 7566 7744 #endif 7567 7745 } 7568 ASMAtomicWriteU64(&pGip->u64NanoTSLastUpdateHz, u64NanoTS +1);7746 ASMAtomicWriteU64(&pGip->u64NanoTSLastUpdateHz, u64NanoTS | 1); 7569 7747 } 7570 7748 … … 7684 7862 return VERR_INVALID_CPU_ID; 7685 7863 7686 if ( g_fOsTscDeltasInSync)7864 if (!GIP_ARE_TSC_DELTAS_APPLICABLE(pGip)) 7687 7865 return VINF_SUCCESS; 7688 7866 … … 7705 7883 * to pass those options to the thread somehow and implement it in the 7706 7884 * thread. Check if anyone uses/needs fAsync before implementing this. */ 7707 RTCpuSetAdd(&pDevExt->TscDeltaCpuSet, idCpu);7885 RTCpuSetAdd(&pDevExt->TscDeltaCpuSet, pGipCpuWorker->idCpu); 7708 7886 RTSpinlockAcquire(pDevExt->hTscDeltaSpinlock); 7709 7887 if ( pDevExt->enmTscDeltaState == kSupDrvTscDeltaState_Listening … … 7718 7896 #endif 7719 7897 7720 while (cTries-- )7898 while (cTries-- > 0) 7721 7899 { 7722 7900 rc = supdrvMeasureTscDeltaOne(pDevExt, iCpu); … … 7764 7942 while (cTries-- > 0) 7765 7943 { 7766 rc = SUP ReadTsc(&uTsc, &idApic);7944 rc = SUPGetTsc(&uTsc, &idApic); 7767 7945 if (RT_SUCCESS(rc)) 7768 7946 { … … 7773 7951 else 7774 7952 { 7953 /* If we failed to have a TSC-delta, measurement the TSC-delta and retry. */ 7775 7954 int rc2; 7776 7955 uint16_t iCpu; 7777 7778 /* If we failed to have a delta, measurement the delta and retry. */7779 7956 AssertMsgReturn(idApic < RT_ELEMENTS(pGip->aiCpuFromApicId), 7780 7957 ("idApic=%u ArraySize=%u\n", idApic, RT_ELEMENTS(pGip->aiCpuFromApicId)), VERR_INVALID_CPU_INDEX); … … 7782 7959 AssertMsgReturn(iCpu < pGip->cCpus, ("iCpu=%u cCpus=%u\n", iCpu, pGip->cCpus), VERR_INVALID_CPU_INDEX); 7783 7960 7784 Assert( !g_fOsTscDeltasInSync);7961 Assert(GIP_ARE_TSC_DELTAS_APPLICABLE(pGip)); 7785 7962 rc2 = supdrvMeasureTscDeltaOne(pDevExt, iCpu); 7786 7963 if (RT_SUCCESS(rc2)) -
trunk/src/VBox/HostDrivers/Support/SUPDrvInternal.h
r53396 r53430 5 5 6 6 /* 7 * Copyright (C) 2006-201 3Oracle Corporation7 * Copyright (C) 2006-2014 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 232 232 233 233 #if 0 234 /** Use a dedicated kernel thread to service TSC-delta measurement requests. */ 234 /** Use a dedicated kernel thread to service TSC-delta measurement requests. 235 * @todo Test on servers with many CPUs and sockets. */ 235 236 #define SUPDRV_USE_TSC_DELTA_THREAD 236 237 #endif … … 693 694 /** The set of CPUs we need to take measurements for. */ 694 695 RTCPUSET TscDeltaCpuSet; 696 /** The set of CPUs we have completed taken measurements for. */ 697 RTCPUSET TscDeltaObtainedCpuSet; 695 698 /** Whether the TSC-delta measurement was successful. */ 696 699 int rcTscDelta; -
trunk/src/VBox/HostDrivers/Support/testcase/tstGIP-2.cpp
r53358 r53430 39 39 #include <iprt/initterm.h> 40 40 #include <iprt/getopt.h> 41 #include <iprt/x86.h> 42 43 44 /** 45 * Checks whether the CPU advertises an invariant TSC or not. 46 * 47 * @returns true if invariant, false otherwise. 48 */ 49 bool tstIsInvariantTsc(void) 50 { 51 if (ASMHasCpuId()) 52 { 53 uint32_t uEax, uEbx, uEcx, uEdx; 54 ASMCpuId(0x80000000, &uEax, &uEbx, &uEcx, &uEdx); 55 if (uEax >= 0x80000007) 56 { 57 ASMCpuId(0x80000007, &uEax, &uEbx, &uEcx, &uEdx); 58 if (uEdx & X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR) 59 return true; 60 } 61 } 62 return false; 63 } 41 64 42 65 … … 64 87 uint64_t uCpuHzRef = 0; 65 88 uint64_t uCpuHzOverallDeviation = 0; 89 int64_t iCpuHzMaxDeviation = 0; 66 90 int32_t cCpuHzOverallDevCnt = 0; 67 91 RTGETOPTUNION ValueUnion; … … 144 168 else 145 169 { 146 if (pCpu->u32TransactionId > 7) 170 /* Wait until the history validation code takes effect. */ 171 if (pCpu->u32TransactionId > 23 + (8 * 2) + 1) 147 172 { 173 if (RT_ABS(iCpuHzDeviation) > RT_ABS(iCpuHzMaxDeviation)) 174 iCpuHzMaxDeviation = iCpuHzDeviation; 148 175 uCpuHzOverallDeviation += uCpuHzDeviation; 149 176 cCpuHzOverallDevCnt++; … … 222 249 RTPrintf("tstGIP-2: offline: %lld\n", g_pSUPGlobalInfoPage->aCPUs[iCpu].i64TSCDelta); 223 250 224 if (uCpuHzRef) 251 RTPrintf("CPUID.Invariant-TSC : %RTbool\n", tstIsInvariantTsc()); 252 if ( uCpuHzRef 253 && cCpuHzOverallDevCnt) 225 254 { 226 uint32_t uPct = (uint32_t)(uCpuHzOverallDeviation * 100000 / cCpuHzOverallDevCnt / uCpuHzRef + 5); 227 RTPrintf("tstGIP-2: Overall CpuHz deviation: %d.%02d%%\n", uPct / 1000, (uPct % 1000) / 10); 255 uint32_t uPct = (uint32_t)(uCpuHzOverallDeviation * 100000 / cCpuHzOverallDevCnt / uCpuHzRef + 5); 256 uint32_t uMaxPct = (uint32_t)(RT_ABS(iCpuHzMaxDeviation) * 100000 / uCpuHzRef + 5); 257 RTPrintf("Average CpuHz deviation: %d.%02d%%\n", uPct / 1000, (uPct % 1000) / 10); 258 RTPrintf("Maximum CpuHz deviation: %d.%02d%% (%RI64 ticks)\n", uMaxPct / 1000, (uMaxPct % 1000) / 10, iCpuHzMaxDeviation); 228 259 } 229 260 } … … 240 271 return !!rc; 241 272 } 273 -
trunk/src/VBox/Runtime/common/time/timesup.cpp
r48935 r53430 5 5 6 6 /* 7 * Copyright (C) 2006-201 1Oracle Corporation7 * Copyright (C) 2006-2014 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 124 124 if ( pGip 125 125 && pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC 126 && ( pGip->u32Mode == SUPGIPMODE_SYNC_TSC 126 && ( pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC 127 || pGip->u32Mode == SUPGIPMODE_SYNC_TSC 127 128 || pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)) 128 129 return rtTimeNanoTSInternalRediscover(pData); … … 146 147 if ( pGip 147 148 && pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC 148 && ( pGip->u32Mode == SUPGIPMODE_SYNC_TSC 149 && ( pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC 150 || pGip->u32Mode == SUPGIPMODE_SYNC_TSC 149 151 || pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)) 150 152 { 151 153 if (ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2) 152 iWorker = pGip->u32Mode == SUPGIPMODE_ SYNC_TSC154 iWorker = pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC || pGip->u32Mode == SUPGIPMODE_SYNC_TSC 153 155 ? RTTIMENANO_WORKER_SYNC_LFENCE 154 156 : RTTIMENANO_WORKER_ASYNC_LFENCE; 155 157 else 156 iWorker = pGip->u32Mode == SUPGIPMODE_ SYNC_TSC158 iWorker = pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC || pGip->u32Mode == SUPGIPMODE_SYNC_TSC 157 159 ? RTTIMENANO_WORKER_SYNC_CPUID 158 160 : RTTIMENANO_WORKER_ASYNC_CPUID; -
trunk/src/VBox/Runtime/common/time/timesupA.mac
r44528 r53430 5 5 6 6 ; 7 ; Copyright (C) 2006-201 1Oracle Corporation7 ; Copyright (C) 2006-2014 Oracle Corporation 8 8 ; 9 9 ; This file is part of VirtualBox Open Source Edition (OSE), as … … 126 126 mov u32UpdateIntervalTSC, edx 127 127 rdtsc 128 SupTscDeltaApply edi ; Apply inter-cpu TSC-delta to have the normalized TSC value in edx:eax 128 129 mov ecx, [edi + SUPGIPCPU.u64NanoTS] 129 130 mov u64CurNanoTS, ecx … … 544 545 mov u32UpdateIntervalTSC, [pGipCPU + SUPGIPCPU.u32UpdateIntervalTSC] 545 546 rdtsc 547 SUPTscDeltaApply pGipCPU 546 548 mov u64PrevNanoTS, [pData + RTTIMENANOTSDATA.pu64Prev] 547 549 mov u64PrevNanoTS, [u64PrevNanoTS] -
trunk/src/VBox/Runtime/common/time/timesupref.h
r44528 r53430 5 5 6 6 /* 7 * Copyright (C) 2006-201 1Oracle Corporation7 * Copyright (C) 2006-2014 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as -
trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp
r53325 r53430 5 5 6 6 /* 7 * Copyright (C) 2006-201 2Oracle Corporation7 * Copyright (C) 2006-2014 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 64 64 { 65 65 pVCpu->tm.s.fTSCTicking = true; 66 if (pVM->tm.s.fTSCVirtualized) 67 { 68 /** @todo Test that pausing and resuming doesn't cause lag! (I.e. that we're 69 * unpaused before the virtual time and stopped after it. */ 70 if (pVM->tm.s.fTSCUseRealTSC) 71 pVCpu->tm.s.offTSCRawSrc = ASMReadTSC() - pVCpu->tm.s.u64TSC; 72 else 73 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) 74 - pVCpu->tm.s.u64TSC; 75 } 66 67 /** @todo Test that pausing and resuming doesn't cause lag! (I.e. that we're 68 * unpaused before the virtual time and stopped after it. */ 69 if (pVM->tm.s.enmMode == TMMODE_REAL_TSC_OFFSET) 70 pVCpu->tm.s.offTSCRawSrc = ASMReadTSC() - pVCpu->tm.s.u64TSC; 71 else 72 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) 73 - pVCpu->tm.s.u64TSC; 76 74 return VINF_SUCCESS; 77 75 } … … 94 92 /* TSC must be ticking before calling tmCpuTickGetRawVirtual()! */ 95 93 pVCpu->tm.s.fTSCTicking = true; 96 if (pVM->tm.s.fTSCVirtualized) 97 { 98 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cTSCsTicking); 99 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE); 100 if (c == 1) 101 { 102 /* The first VCPU to resume. */ 103 uint64_t offTSCRawSrcOld = pVCpu->tm.s.offTSCRawSrc; 104 105 STAM_COUNTER_INC(&pVM->tm.s.StatTSCResume); 106 107 /* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */ 108 if (pVM->tm.s.fTSCUseRealTSC) 109 pVCpu->tm.s.offTSCRawSrc = ASMReadTSC() - pVM->tm.s.u64LastPausedTSC; 110 else 111 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) 112 - pVM->tm.s.u64LastPausedTSC; 113 114 /* Calculate the offset for other VCPUs to use. */ 115 pVM->tm.s.offTSCPause = pVCpu->tm.s.offTSCRawSrc - offTSCRawSrcOld; 116 } 94 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cTSCsTicking); 95 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE); 96 if (c == 1) 97 { 98 /* The first VCPU to resume. */ 99 uint64_t offTSCRawSrcOld = pVCpu->tm.s.offTSCRawSrc; 100 101 STAM_COUNTER_INC(&pVM->tm.s.StatTSCResume); 102 103 /* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */ 104 if (pVM->tm.s.enmMode == TMMODE_REAL_TSC_OFFSET) 105 pVCpu->tm.s.offTSCRawSrc = ASMReadTSC() - pVM->tm.s.u64LastPausedTSC; 117 106 else 118 { 119 /* All other VCPUs (if any). */ 120 pVCpu->tm.s.offTSCRawSrc += pVM->tm.s.offTSCPause; 121 } 107 pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) 108 - pVM->tm.s.u64LastPausedTSC; 109 110 /* Calculate the offset for other VCPUs to use. */ 111 pVM->tm.s.offTSCPause = pVCpu->tm.s.offTSCRawSrc - offTSCRawSrcOld; 112 } 113 else 114 { 115 /* All other VCPUs (if any). */ 116 pVCpu->tm.s.offTSCRawSrc += pVM->tm.s.offTSCPause; 122 117 } 123 118 } … … 187 182 { 188 183 /* Sample the reason for refusing. */ 189 if ( !pVM->tm.s.fMaybeUseOffsettedHostTSC)184 if (pVM->tm.s.enmMode != TMMODE_DYNAMIC) 190 185 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotFixed); 191 186 else if (!pVCpu->tm.s.fTSCTicking) 192 187 STAM_COUNTER_INC(&pVM->tm.s.StatTSCNotTicking); 193 else if ( !pVM->tm.s.fTSCUseRealTSC)188 else if (pVM->tm.s.enmMode != TMMODE_REAL_TSC_OFFSET) 194 189 { 195 190 if (pVM->tm.s.fVirtualSyncCatchUp) … … 240 235 */ 241 236 *pfParavirtTsc = GIMIsParavirtTscEnabled(pVM); 242 if ( pVM->tm.s. fMaybeUseOffsettedHostTSC237 if ( pVM->tm.s.enmMode == TMMODE_DYNAMIC 243 238 && RT_LIKELY(pVCpu->tm.s.fTSCTicking) 244 && ( pVM->tm.s. fTSCUseRealTSC239 && ( pVM->tm.s.enmMode == TMMODE_REAL_TSC_OFFSET 245 240 || ( !pVM->tm.s.fVirtualSyncCatchUp 246 241 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking) 247 242 && !pVM->tm.s.fVirtualWarpDrive))) 248 243 { 249 if ( !pVM->tm.s.fTSCUseRealTSC)244 if (pVM->tm.s.enmMode != TMMODE_REAL_TSC_OFFSET) 250 245 { 251 246 /* The source is the timer synchronous virtual clock. */ 252 Assert(pVM->tm.s.fTSCVirtualized);253 254 247 if (poffRealTSC) 255 248 { … … 266 259 { 267 260 /* The source is the real TSC. */ 268 if (pVM->tm.s.fTSCVirtualized) 269 *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc; 270 else 271 *poffRealTSC = 0; 261 *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc; 272 262 } 273 263 /** @todo count this? */ … … 339 329 */ 340 330 *pfParavirtTsc = GIMIsParavirtTscEnabled(pVM); 341 if ( pVM->tm.s. fMaybeUseOffsettedHostTSC331 if ( pVM->tm.s.enmMode == TMMODE_DYNAMIC 342 332 && RT_LIKELY(pVCpu->tm.s.fTSCTicking) 343 && ( pVM->tm.s. fTSCUseRealTSC333 && ( pVM->tm.s.enmMode == TMMODE_REAL_TSC_OFFSET 344 334 || ( !pVM->tm.s.fVirtualSyncCatchUp 345 335 && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking) … … 347 337 { 348 338 *pfOffsettedTsc = true; 349 if ( !pVM->tm.s.fTSCUseRealTSC)339 if (pVM->tm.s.enmMode != TMMODE_REAL_TSC_OFFSET) 350 340 { 351 341 /* The source is the timer synchronous virtual clock. */ 352 Assert(pVM->tm.s.fTSCVirtualized);353 354 342 uint64_t cNsToDeadline; 355 343 uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline); … … 364 352 { 365 353 /* The source is the real TSC. */ 366 if (pVM->tm.s.fTSCVirtualized) 367 *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc; 368 else 369 *poffRealTSC = 0; 354 *poffRealTSC = 0 - pVCpu->tm.s.offTSCRawSrc; 370 355 cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM)); 371 356 } … … 398 383 { 399 384 PVM pVM = pVCpu->CTX_SUFF(pVM); 400 if (pVM->tm.s.fTSCVirtualized) 401 { 402 if (pVM->tm.s.fTSCUseRealTSC) 403 u64 = ASMReadTSC(); 404 else 405 u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers); 406 u64 -= pVCpu->tm.s.offTSCRawSrc; 407 } 385 if (pVM->tm.s.enmMode == TMMODE_REAL_TSC_OFFSET) 386 u64 = ASMReadTSC(); 408 387 else 409 u64 = ASMReadTSC(); 388 u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers); 389 u64 -= pVCpu->tm.s.offTSCRawSrc; 410 390 411 391 /* Always return a value higher than what the guest has already seen. */ … … 523 503 VMMDECL(uint64_t) TMCpuTicksPerSecond(PVM pVM) 524 504 { 525 if (pVM->tm.s.fTSCUseRealTSC) 505 /** @todo revisit this, not sure why we need to get the rate from GIP for 506 * real-tsc-offset. */ 507 if (pVM->tm.s.enmMode == TMMODE_REAL_TSC_OFFSET) 526 508 { 527 509 uint64_t cTSCTicksPerSecond = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage); -
trunk/src/VBox/VMM/VMMR3/TM.cpp
r53326 r53430 5 5 6 6 /* 7 * Copyright (C) 2006-201 3Oracle Corporation7 * Copyright (C) 2006-2014 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 167 167 *******************************************************************************/ 168 168 static bool tmR3HasFixedTSC(PVM pVM); 169 static const char * tmR3GetModeName(PVM pVM); 169 170 static uint64_t tmR3CalibrateTSC(PVM pVM); 170 171 static DECLCALLBACK(int) tmR3Save(PVM pVM, PSSMHANDLE pSSM); … … 271 272 if (ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2) 272 273 { 273 if (g_pSUPGlobalInfoPage->u32Mode == SUPGIPMODE_SYNC_TSC) 274 if ( g_pSUPGlobalInfoPage->u32Mode == SUPGIPMODE_INVARIANT_TSC 275 || g_pSUPGlobalInfoPage->u32Mode == SUPGIPMODE_SYNC_TSC) 274 276 pVM->tm.s.pfnVirtualGetRawR3 = RTTimeNanoTSLFenceSync; 275 277 else … … 278 280 else 279 281 { 280 if (g_pSUPGlobalInfoPage->u32Mode == SUPGIPMODE_SYNC_TSC) 282 if ( g_pSUPGlobalInfoPage->u32Mode == SUPGIPMODE_INVARIANT_TSC 283 || g_pSUPGlobalInfoPage->u32Mode == SUPGIPMODE_SYNC_TSC) 281 284 pVM->tm.s.pfnVirtualGetRawR3 = RTTimeNanoTSLegacySync; 282 285 else … … 310 313 311 314 /* 315 * Handle deprecated TM settings. 316 */ 317 do 318 { 319 /** @todo make these runtime warnings instead of errors that refuse to start 320 * the VM? */ 321 bool fTSCVirtualized; 322 rc = CFGMR3QueryBool(pCfgHandle, "TSCVirtualized", &fTSCVirtualized); 323 if (RT_SUCCESS(rc)) 324 return VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_VALUE, RT_SRC_POS, 325 N_("Configuration error: TM setting \"TSCVirtualized\" is no longer supported. Use the \"Mode\" setting instead.")); 326 327 bool fForceUseRealTSC; 328 rc = CFGMR3QueryBool(pCfgHandle, "UseRealTSC", &fForceUseRealTSC); 329 if (RT_SUCCESS(rc)) 330 return VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_VALUE, RT_SRC_POS, 331 N_("Configuration error: TM setting \"UseRealTSC\" is no longer supported. Use the \"Mode\" setting instead.")); 332 333 bool fMaybeUseOffsettedHostTSC; 334 rc = CFGMR3QueryBool(pCfgHandle, "MaybeUseOffsettedHostTSC", &fMaybeUseOffsettedHostTSC); 335 if (RT_SUCCESS(rc)) 336 return VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_VALUE, RT_SRC_POS, 337 N_("Configuration error: TM setting \"MaybeUseOffsettedHostTSC\" is no longer supported. Use the \"Mode\" setting instead.")); 338 } while(0); 339 340 /* 341 * Validate the rest of the TM settings. 342 */ 343 if (!CFGMR3AreValuesValid(pCfgHandle, 344 "Mode\0" 345 "TSCTicksPerSecond\0" 346 "TSCTiedToExecution\0" 347 "TSCNotTiedToHalt\0" 348 "ScheduleSlack\0" 349 "CatchUpStopThreshold\0" 350 "CatchUpGiveUpThreshold\0" 351 "CatchUpStartThreshold\0" 352 "CatchUpPrecentage\0" 353 "UTCOffset\0" 354 "WarpDrivePercentage\0" 355 "HostHzMax\0" 356 "HostHzFudgeFactorTimerCpu\0" 357 "HostHzFudgeFactorOtherCpu\0" 358 "HostHzFudgeFactorCatchUp100\0" 359 "HostHzFudgeFactorCatchUp200\0" 360 "HostHzFudgeFactorCatchUp400\0" 361 "TimerMillies\0" 362 )) 363 return VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_NODE, RT_SRC_POS, N_("Configuration error: Invalid config key for TM.")); 364 365 /* 312 366 * Determine the TSC configuration and frequency. 313 367 */ 314 /* mode */ 315 /** @cfgm{/TM/TSCVirtualized,bool,true} 316 * Use a virtualize TSC, i.e. trap all TSC access. */ 317 rc = CFGMR3QueryBool(pCfgHandle, "TSCVirtualized", &pVM->tm.s.fTSCVirtualized); 368 /** @cfgm{/TM/Mode, string} 369 * The name of the time-keeping mode. The default is picked dynamically based 370 * on configuration of the VM. */ 371 char szMode[32]; 372 rc = CFGMR3QueryString(pCfgHandle, "Mode", szMode, sizeof(szMode)); 318 373 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 319 pVM->tm.s. fTSCVirtualized = true; /* trap rdtsc */374 pVM->tm.s.enmMode = tmR3HasFixedTSC(pVM) ? TMMODE_DYNAMIC : TMMODE_VIRT_TSC_EMULATED; 320 375 else if (RT_FAILURE(rc)) 321 return VMSetError(pVM, rc, RT_SRC_POS, 322 N_("Configuration error: Failed to querying bool value \"UseRealTSC\"")); 323 324 /* source */ 325 /** @cfgm{/TM/UseRealTSC,bool,false} 326 * Use the real TSC as time source for the TSC instead of the synchronous 327 * virtual clock (false, default). */ 328 rc = CFGMR3QueryBool(pCfgHandle, "UseRealTSC", &pVM->tm.s.fTSCUseRealTSC); 329 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 330 pVM->tm.s.fTSCUseRealTSC = false; /* use virtual time */ 331 else if (RT_FAILURE(rc)) 332 return VMSetError(pVM, rc, RT_SRC_POS, 333 N_("Configuration error: Failed to querying bool value \"UseRealTSC\"")); 334 if (!pVM->tm.s.fTSCUseRealTSC) 335 pVM->tm.s.fTSCVirtualized = true; 336 337 /* TSC reliability */ 338 /** @cfgm{/TM/MaybeUseOffsettedHostTSC,bool,detect} 339 * Whether the CPU has a fixed TSC rate and may be used in offsetted mode with 340 * VT-x/AMD-V execution. This is autodetected in a very restrictive way by 341 * default. */ 342 rc = CFGMR3QueryBool(pCfgHandle, "MaybeUseOffsettedHostTSC", &pVM->tm.s.fMaybeUseOffsettedHostTSC); 343 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 344 { 345 if (!pVM->tm.s.fTSCUseRealTSC) 346 pVM->tm.s.fMaybeUseOffsettedHostTSC = tmR3HasFixedTSC(pVM); 376 return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying string value \"Mode\"")); 377 else 378 { 379 AssertRC(rc); 380 if (!RTStrCmp(szMode, "VirtTSCEmulated")) 381 pVM->tm.s.enmMode = TMMODE_VIRT_TSC_EMULATED; 382 else if (!RTStrCmp(szMode, "RealTSCOffset")) 383 pVM->tm.s.enmMode = TMMODE_REAL_TSC_OFFSET; 384 else if (!RTStrCmp(szMode, "Dynamic")) 385 pVM->tm.s.enmMode = TMMODE_DYNAMIC; 347 386 else 348 pVM->tm.s.fMaybeUseOffsettedHostTSC = true; 349 /** @todo needs a better fix, for now disable offsetted mode for VMs 350 * with more than one VCPU. With the current TSC handling (frequent 351 * switching between offsetted mode and taking VM exits, on all VCPUs 352 * without any kind of coordination) it will lead to inconsistent TSC 353 * behavior with guest SMP, including TSC going backwards. */ 354 if ( pVM->cCpus != 1 355 && !pVM->tm.s.fTSCUseRealTSC) 356 pVM->tm.s.fMaybeUseOffsettedHostTSC = false; 357 } 387 return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Unrecognized TM mode value \"%s\""), szMode); 388 } 389 390 /** @todo needs a better fix, for now disable offsetted mode for VMs 391 * with more than one VCPU. With the current TSC handling (frequent 392 * switching between offsetted mode and taking VM exits, on all VCPUs 393 * without any kind of coordination) it will lead to inconsistent TSC 394 * behavior with guest SMP, including TSC going backwards. */ 395 if ( pVM->cCpus != 1 396 && pVM->tm.s.enmMode != TMMODE_REAL_TSC_OFFSET) 397 pVM->tm.s.enmMode = TMMODE_VIRT_TSC_EMULATED; 358 398 359 399 /** @cfgm{/TM/TSCTicksPerSecond, uint32_t, Current TSC frequency from GIP} 360 400 * The number of TSC ticks per second (i.e. the TSC frequency). This will 361 * override TSC UseRealTSC, TSCVirtualized and MaybeUseOffsettedHostTSC.401 * override TSCtTSC, TSCVirtualized and MaybeUseOffsettedHostTSC. 362 402 */ 363 403 rc = CFGMR3QueryU64(pCfgHandle, "TSCTicksPerSecond", &pVM->tm.s.cTSCTicksPerSecond); … … 365 405 { 366 406 pVM->tm.s.cTSCTicksPerSecond = tmR3CalibrateTSC(pVM); 367 if ( !pVM->tm.s.fTSCUseRealTSC368 && 407 if ( pVM->tm.s.enmMode != TMMODE_REAL_TSC_OFFSET 408 && pVM->tm.s.cTSCTicksPerSecond >= _4G) 369 409 { 370 410 pVM->tm.s.cTSCTicksPerSecond = _4G - 1; /* (A limitation of our math code) */ 371 pVM->tm.s. fMaybeUseOffsettedHostTSC = false;411 pVM->tm.s.enmMode = TMMODE_VIRT_TSC_EMULATED; 372 412 } 373 413 } … … 382 422 else 383 423 { 384 pVM->tm.s.fTSCUseRealTSC = pVM->tm.s.fMaybeUseOffsettedHostTSC = false; 385 pVM->tm.s.fTSCVirtualized = true; 424 pVM->tm.s.enmMode = TMMODE_VIRT_TSC_EMULATED; 386 425 } 387 426 … … 398 437 N_("Configuration error: Failed to querying bool value \"TSCTiedToExecution\"")); 399 438 if (pVM->tm.s.fTSCTiedToExecution) 400 { 401 /* tied to execution, override all other settings. */ 402 pVM->tm.s.fTSCVirtualized = true; 403 pVM->tm.s.fTSCUseRealTSC = true; 404 pVM->tm.s.fMaybeUseOffsettedHostTSC = false; 405 } 439 pVM->tm.s.enmMode = TMMODE_VIRT_TSC_EMULATED; 406 440 407 441 /** @cfgm{/TM/TSCNotTiedToHalt, bool, true} … … 412 446 return VMSetError(pVM, rc, RT_SRC_POS, 413 447 N_("Configuration error: Failed to querying bool value \"TSCNotTiedToHalt\"")); 414 415 /* setup and report */416 if (pVM->tm.s.fTSCVirtualized)417 CPUMR3SetCR4Feature(pVM, X86_CR4_TSD, ~X86_CR4_TSD);418 else419 CPUMR3SetCR4Feature(pVM, 0, ~X86_CR4_TSD);420 LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%'RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool\n"421 "TM: fMaybeUseOffsettedHostTSC=%RTbool TSCTiedToExecution=%RTbool TSCNotTiedToHalt=%RTbool\n",422 pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.fTSCVirtualized, pVM->tm.s.fTSCUseRealTSC,423 pVM->tm.s.fMaybeUseOffsettedHostTSC, pVM->tm.s.fTSCTiedToExecution, pVM->tm.s.fTSCNotTiedToHalt));424 448 425 449 /* … … 525 549 pVM->tm.s.fVirtualWarpDrive = pVM->tm.s.u32VirtualWarpDrivePercentage != 100; 526 550 if (pVM->tm.s.fVirtualWarpDrive) 527 LogRel(("TM: u32VirtualWarpDrivePercentage=%RI32\n", pVM->tm.s.u32VirtualWarpDrivePercentage)); 551 { 552 pVM->tm.s.enmMode = TMMODE_VIRT_TSC_EMULATED; 553 LogRel(("TM: Warp-drive active. u32VirtualWarpDrivePercentage=%RI32\n", pVM->tm.s.u32VirtualWarpDrivePercentage)); 554 } 555 556 /* Setup and report */ 557 CPUMR3SetCR4Feature(pVM, X86_CR4_TSD, ~X86_CR4_TSD); 558 LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%'RU64) enmMode=%d (%s)\n" 559 "TM: TSCTiedToExecution=%RTbool TSCNotTiedToHalt=%RTbool\n", 560 pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.enmMode, tmR3GetModeName(pVM), 561 pVM->tm.s.fTSCTiedToExecution, pVM->tm.s.fTSCNotTiedToHalt)); 528 562 529 563 /* … … 776 810 static bool tmR3HasFixedTSC(PVM pVM) 777 811 { 812 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage; 813 if (pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC) 814 return true; 815 778 816 if (ASMHasCpuId()) 779 817 { … … 782 820 if (CPUMGetHostCpuVendor(pVM) == CPUMCPUVENDOR_AMD) 783 821 { 822 /** @todo This is redundant as it would get satisified in the invariant case 823 * above. Remove later or keep around for sync mode override? */ 784 824 /* 785 825 * AuthenticAMD - Check for APM support and that TscInvariant is set. … … 792 832 if (uEAX >= 0x80000007) 793 833 { 794 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;795 796 834 ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX); 797 835 if ( (uEDX & X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR) /* TscInvariant */ 798 && pGip->u32Mode == SUPGIPMODE_SYNC_TSC /* no fixed tsc if the gip timer is in async mode */) 836 && ( pGip->u32Mode == SUPGIPMODE_SYNC_TSC /* No fixed tsc if the gip timer is in async mode. */ 837 || pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC)) 799 838 return true; 800 839 } … … 852 891 { 853 892 /* 854 * Use GIP when available present.893 * Use GIP when available. 855 894 */ 856 895 uint64_t u64Hz = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage); 896 if (g_pSUPGlobalInfoPage->u32Mode == SUPGIPMODE_INVARIANT_TSC) 897 { 898 Assert(u64Hz != UINT64_MAX); 899 return u64Hz; 900 } 901 857 902 if (u64Hz != UINT64_MAX) 858 903 { … … 864 909 /* Spin for 40ms to try push up the CPU frequency and get a more reliable CpuHz value. */ 865 910 const uint64_t u64 = RTTimeMilliTS(); 866 while ((RTTimeMilliTS() - u64) < 40 /* ms*/)911 while ((RTTimeMilliTS() - u64) < 40 /* ms */) 867 912 /* nothing */; 868 913 } … … 873 918 } 874 919 875 /* call this once first to make sure it's initialized. */920 /* Call this once first to make sure it's initialized. */ 876 921 RTTimeNanoTS(); 877 922 … … 1278 1323 pVM->tm.s.u64LastPausedTSC = pVCpu->tm.s.u64TSC; 1279 1324 1280 if (pVM->tm.s. fTSCUseRealTSC)1325 if (pVM->tm.s.enmMode == TMMODE_REAL_TSC_OFFSET) 1281 1326 pVCpu->tm.s.offTSCRawSrc = 0; /** @todo TSC restore stuff and HWACC. */ 1282 1327 } … … 1285 1330 if (RT_FAILURE(rc)) 1286 1331 return rc; 1287 if ( !pVM->tm.s.fTSCUseRealTSC)1332 if (pVM->tm.s.enmMode != TMMODE_REAL_TSC_OFFSET) 1288 1333 pVM->tm.s.cTSCTicksPerSecond = u64Hz; 1289 1290 LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%'RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool (state load)\n", 1291 pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.fTSCVirtualized, pVM->tm.s.fTSCUseRealTSC)); 1334 /** @todo Compare with real TSC rate even when restoring with real-tsc-offset 1335 * mode. */ 1336 1337 LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%'RU64) enmMode=%d (%s) (state load)\n", 1338 pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.enmMode, tmR3GetModeName(pVM))); 1292 1339 1293 1340 /* … … 2803 2850 TMR3NotifySuspend(pVM, pVCpu); 2804 2851 2852 /** @todo should probably switch TM mode to virt-tsc-emulated if it isn't 2853 * already. */ 2805 2854 pVM->tm.s.u32VirtualWarpDrivePercentage = u32Percent; 2806 2855 pVM->tm.s.fVirtualWarpDrive = u32Percent != 100; … … 3148 3197 */ 3149 3198 pHlp->pfnPrintf(pHlp, 3150 "Cpu Tick: %18RU64 (%#016RX64) %RU64Hz %s %s",3199 "Cpu Tick: %18RU64 (%#016RX64) %RU64Hz %s - virtualized", 3151 3200 u64TSC, u64TSC, TMCpuTicksPerSecond(pVM), 3152 pVCpu->tm.s.fTSCTicking ? "ticking" : "paused", 3153 pVM->tm.s.fTSCVirtualized ? " - virtualized" : ""); 3154 if (pVM->tm.s.fTSCUseRealTSC) 3201 pVCpu->tm.s.fTSCTicking ? "ticking" : "paused"); 3202 if (pVM->tm.s.enmMode == TMMODE_REAL_TSC_OFFSET) 3155 3203 { 3156 pHlp->pfnPrintf(pHlp, " - real tsc ");3204 pHlp->pfnPrintf(pHlp, " - real tsc offset"); 3157 3205 if (pVCpu->tm.s.offTSCRawSrc) 3158 3206 pHlp->pfnPrintf(pHlp, "\n offset %RU64", pVCpu->tm.s.offTSCRawSrc); … … 3198 3246 } 3199 3247 3248 3249 /** 3250 * Gets the descriptive TM mode name. 3251 * 3252 * @returns The name. 3253 * @param pVM Pointer to the VM. 3254 */ 3255 static const char * tmR3GetModeName(PVM pVM) 3256 { 3257 Assert(pVM); 3258 switch (pVM->tm.s.enmMode) 3259 { 3260 case TMMODE_REAL_TSC_OFFSET: return "RealTscOffset"; 3261 case TMMODE_VIRT_TSC_EMULATED: return "VirtTscEmulated"; 3262 case TMMODE_DYNAMIC: return "Dynamic"; 3263 default: return "?????"; 3264 } 3265 } 3266 -
trunk/src/VBox/VMM/include/TMInternal.h
r52764 r53430 5 5 6 6 /* 7 * Copyright (C) 2006-201 2Oracle Corporation7 * Copyright (C) 2006-2014 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 316 316 typedef TMCPULOADSTATE *PTMCPULOADSTATE; 317 317 318 319 /** 320 * TM mode. 321 * The main modes of how TM abstracts time. 322 */ 323 typedef enum TMMODE 324 { 325 /** The guest TSC is an emulated virtual TSC. */ 326 TMMODE_VIRT_TSC_EMULATED = 1, 327 /** The guest TSC is an offset of the real TSC. */ 328 TMMODE_REAL_TSC_OFFSET, 329 /** The guest TSC is dynamically derived through emulation or offsetting. */ 330 TMMODE_DYNAMIC 331 } TMMODE; 332 333 318 334 /** 319 335 * Converts a TM pointer into a VM pointer. … … 334 350 RTUINT offVM; 335 351 336 /** Set if we fully virtualize the TSC, i.e. intercept all rdtsc instructions. 337 * Config variable: TSCVirtualized (bool) */ 338 bool fTSCVirtualized; 339 /** Set if we use the real TSC as time source or if we use the virtual clock. 340 * If fTSCVirtualized is set we maintain a offset to the TSC and pausing/resuming the 341 * ticking. fTSCVirtualized = false implies fTSCUseRealTSC = true. 342 * Config variable: TSCUseRealTSC (bool) */ 343 bool fTSCUseRealTSC; 344 /** Flag indicating that the host TSC is suitable for use in AMD-V and VT-x mode. 345 * Config variable: MaybeUseOffsettedHostTSC (boolean) */ 346 bool fMaybeUseOffsettedHostTSC; 352 /** The current timekeeping mode of the VM. 353 * Config variable: Mode (string) */ 354 TMMODE enmMode; 347 355 /** Whether the TSC is tied to the execution of code. 348 356 * Config variable: TSCTiedToExecution (bool) */ … … 351 359 * Config variable: TSCNotTiedToHalt (bool) */ 352 360 bool fTSCNotTiedToHalt; 353 bool afAlignment0[2]; /**< alignment padding */ 361 /** Alignment. */ 362 bool afAlignment0[2]; 354 363 /** The ID of the virtual CPU that normally runs the timers. */ 355 364 VMCPUID idTimerCpu; … … 357 366 /** The number of CPU clock ticks per second (TMCLOCK_TSC). 358 367 * Config variable: TSCTicksPerSecond (64-bit unsigned int) 359 * The config variable implies fTSCVirtualized = true and fTSCUseRealTSC = false. */ 368 * The config variable implies @c enmMode would be 369 * TMMODE_VIRT_TSC_EMULATED. */ 360 370 uint64_t cTSCTicksPerSecond; 361 371 /** The TSC difference introduced by pausing the VM. */ … … 374 384 /** Virtual timer synchronous time catch-up active. */ 375 385 bool volatile fVirtualSyncCatchUp; 376 bool afAlignment1[1]; /**< alignment padding */ 386 /** Alignment. */ 387 bool afAlignment1[1]; 377 388 /** WarpDrive percentage. 378 389 * 100% is normal (fVirtualSyncNormal == true). When other than 100% we apply … … 781 792 Assert(PDMCritSectIsOwner(&(a_pVM)->tm.s.TimerCritSect)) 782 793 783 784 794 /** @} */ 785 795 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r51301 r53430 8 8 9 9 /* 10 * Copyright (C) 2006-201 3Oracle Corporation10 * Copyright (C) 2006-2014 Oracle Corporation 11 11 * 12 12 * This file is part of VirtualBox Open Source Edition (OSE), as … … 1011 1011 GEN_CHECK_OFF(TM, pvGIPRC); 1012 1012 GEN_CHECK_OFF(TMCPU, fTSCTicking); 1013 GEN_CHECK_OFF(TM, fTSCUseRealTSC);1013 GEN_CHECK_OFF(TM, enmMode); 1014 1014 GEN_CHECK_OFF(TM, fTSCTiedToExecution); 1015 1015 GEN_CHECK_OFF(TMCPU, offTSCRawSrc);
Note:
See TracChangeset
for help on using the changeset viewer.